python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "smuio_v13_0_3.h"
#include "soc15_common.h"
#include "smuio/smuio_13_0_3_offset.h"
#include "smuio/smuio_13_0_3_sh_mask.h"
#define PKG_TYPE_MASK 0x00000003L
/**
* smuio_v13_0_3_get_die_id - query die id from FCH.
*
* @adev: amdgpu device pointer
*
* Returns die id
*/
static u32 smuio_v13_0_3_get_die_id(struct amdgpu_device *adev)
{
u32 data, die_id;
data = RREG32_SOC15(SMUIO, 0, regSMUIO_MCM_CONFIG);
die_id = REG_GET_FIELD(data, SMUIO_MCM_CONFIG, DIE_ID);
return die_id;
}
/**
* smuio_v13_0_3_get_socket_id - query socket id from FCH
*
* @adev: amdgpu device pointer
*
* Returns socket id
*/
static u32 smuio_v13_0_3_get_socket_id(struct amdgpu_device *adev)
{
u32 data, socket_id;
data = RREG32_SOC15(SMUIO, 0, regSMUIO_MCM_CONFIG);
socket_id = REG_GET_FIELD(data, SMUIO_MCM_CONFIG, SOCKET_ID);
return socket_id;
}
/**
* smuio_v13_0_3_get_pkg_type - query package type set by MP1/bootcode
*
* @adev: amdgpu device pointer
*
* Returns package type
*/
static enum amdgpu_pkg_type smuio_v13_0_3_get_pkg_type(struct amdgpu_device *adev)
{
enum amdgpu_pkg_type pkg_type;
u32 data;
data = RREG32_SOC15(SMUIO, 0, regSMUIO_MCM_CONFIG);
data = REG_GET_FIELD(data, SMUIO_MCM_CONFIG, PKG_TYPE);
/* pkg_type[4:0]
*
* bit 1 == 1 APU form factor
*
* b0100 - b1111 - Reserved
*/
switch (data & PKG_TYPE_MASK) {
case 0x2:
pkg_type = AMDGPU_PKG_TYPE_APU;
break;
default:
pkg_type = AMDGPU_PKG_TYPE_UNKNOWN;
break;
}
return pkg_type;
}
const struct amdgpu_smuio_funcs smuio_v13_0_3_funcs = {
.get_die_id = smuio_v13_0_3_get_die_id,
.get_socket_id = smuio_v13_0_3_get_socket_id,
.get_pkg_type = smuio_v13_0_3_get_pkg_type,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.c |
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/firmware.h>
#include <linux/module.h>
#include "amdgpu.h"
#include "soc15_common.h"
#include "soc21.h"
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
#include "gc/gc_11_0_0_default.h"
#include "v11_structs.h"
#include "mes_v11_api_def.h"
MODULE_FIRMWARE("amdgpu/gc_11_0_0_mes.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_mes_2.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_0_mes1.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_1_mes.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_1_mes_2.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_1_mes1.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes_2.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_2_mes1.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes_2.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_3_mes1.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes_2.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_4_mes1.bin");
static int mes_v11_0_hw_fini(void *handle);
static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev);
static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev);
#define MES_EOP_SIZE 2048
static void mes_v11_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring->use_doorbell) {
atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
ring->wptr);
WDOORBELL64(ring->doorbell_index, ring->wptr);
} else {
BUG();
}
}
static u64 mes_v11_0_ring_get_rptr(struct amdgpu_ring *ring)
{
return *ring->rptr_cpu_addr;
}
static u64 mes_v11_0_ring_get_wptr(struct amdgpu_ring *ring)
{
u64 wptr;
if (ring->use_doorbell)
wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
else
BUG();
return wptr;
}
static const struct amdgpu_ring_funcs mes_v11_0_ring_funcs = {
.type = AMDGPU_RING_TYPE_MES,
.align_mask = 1,
.nop = 0,
.support_64bit_ptrs = true,
.get_rptr = mes_v11_0_ring_get_rptr,
.get_wptr = mes_v11_0_ring_get_wptr,
.set_wptr = mes_v11_0_ring_set_wptr,
.insert_nop = amdgpu_ring_insert_nop,
};
static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
void *pkt, int size,
int api_status_off)
{
int ndw = size / 4;
signed long r;
union MESAPI__ADD_QUEUE *x_pkt = pkt;
struct MES_API_STATUS *api_status;
struct amdgpu_device *adev = mes->adev;
struct amdgpu_ring *ring = &mes->ring;
unsigned long flags;
signed long timeout = adev->usec_timeout;
if (amdgpu_emu_mode) {
timeout *= 100;
} else if (amdgpu_sriov_vf(adev)) {
/* Worst case in sriov where all other 15 VF timeout, each VF needs about 600ms */
timeout = 15 * 600 * 1000;
}
BUG_ON(size % 4 != 0);
spin_lock_irqsave(&mes->ring_lock, flags);
if (amdgpu_ring_alloc(ring, ndw)) {
spin_unlock_irqrestore(&mes->ring_lock, flags);
return -ENOMEM;
}
api_status = (struct MES_API_STATUS *)((char *)pkt + api_status_off);
api_status->api_completion_fence_addr = mes->ring.fence_drv.gpu_addr;
api_status->api_completion_fence_value = ++mes->ring.fence_drv.sync_seq;
amdgpu_ring_write_multiple(ring, pkt, ndw);
amdgpu_ring_commit(ring);
spin_unlock_irqrestore(&mes->ring_lock, flags);
DRM_DEBUG("MES msg=%d was emitted\n", x_pkt->header.opcode);
r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq,
timeout);
if (r < 1) {
DRM_ERROR("MES failed to response msg=%d\n",
x_pkt->header.opcode);
while (halt_if_hws_hang)
schedule();
return -ETIMEDOUT;
}
return 0;
}
static int convert_to_mes_queue_type(int queue_type)
{
if (queue_type == AMDGPU_RING_TYPE_GFX)
return MES_QUEUE_TYPE_GFX;
else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
return MES_QUEUE_TYPE_COMPUTE;
else if (queue_type == AMDGPU_RING_TYPE_SDMA)
return MES_QUEUE_TYPE_SDMA;
else
BUG();
return -1;
}
static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
struct mes_add_queue_input *input)
{
struct amdgpu_device *adev = mes->adev;
union MESAPI__ADD_QUEUE mes_add_queue_pkt;
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
uint32_t vm_cntx_cntl = hub->vm_cntx_cntl;
memset(&mes_add_queue_pkt, 0, sizeof(mes_add_queue_pkt));
mes_add_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
mes_add_queue_pkt.header.opcode = MES_SCH_API_ADD_QUEUE;
mes_add_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
mes_add_queue_pkt.process_id = input->process_id;
mes_add_queue_pkt.page_table_base_addr = input->page_table_base_addr;
mes_add_queue_pkt.process_va_start = input->process_va_start;
mes_add_queue_pkt.process_va_end = input->process_va_end;
mes_add_queue_pkt.process_quantum = input->process_quantum;
mes_add_queue_pkt.process_context_addr = input->process_context_addr;
mes_add_queue_pkt.gang_quantum = input->gang_quantum;
mes_add_queue_pkt.gang_context_addr = input->gang_context_addr;
mes_add_queue_pkt.inprocess_gang_priority =
input->inprocess_gang_priority;
mes_add_queue_pkt.gang_global_priority_level =
input->gang_global_priority_level;
mes_add_queue_pkt.doorbell_offset = input->doorbell_offset;
mes_add_queue_pkt.mqd_addr = input->mqd_addr;
if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
AMDGPU_MES_API_VERSION_SHIFT) >= 2)
mes_add_queue_pkt.wptr_addr = input->wptr_mc_addr;
else
mes_add_queue_pkt.wptr_addr = input->wptr_addr;
mes_add_queue_pkt.queue_type =
convert_to_mes_queue_type(input->queue_type);
mes_add_queue_pkt.paging = input->paging;
mes_add_queue_pkt.vm_context_cntl = vm_cntx_cntl;
mes_add_queue_pkt.gws_base = input->gws_base;
mes_add_queue_pkt.gws_size = input->gws_size;
mes_add_queue_pkt.trap_handler_addr = input->tba_addr;
mes_add_queue_pkt.tma_addr = input->tma_addr;
mes_add_queue_pkt.trap_en = input->trap_en;
mes_add_queue_pkt.skip_process_ctx_clear = input->skip_process_ctx_clear;
mes_add_queue_pkt.is_kfd_process = input->is_kfd_process;
/* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */
mes_add_queue_pkt.is_aql_queue = input->is_aql_queue;
mes_add_queue_pkt.gds_size = input->queue_size;
mes_add_queue_pkt.exclusively_scheduled = input->exclusively_scheduled;
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_add_queue_pkt, sizeof(mes_add_queue_pkt),
offsetof(union MESAPI__ADD_QUEUE, api_status));
}
static int mes_v11_0_remove_hw_queue(struct amdgpu_mes *mes,
struct mes_remove_queue_input *input)
{
union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt;
memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt));
mes_remove_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
mes_remove_queue_pkt.header.opcode = MES_SCH_API_REMOVE_QUEUE;
mes_remove_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset;
mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr;
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
offsetof(union MESAPI__REMOVE_QUEUE, api_status));
}
static int mes_v11_0_unmap_legacy_queue(struct amdgpu_mes *mes,
struct mes_unmap_legacy_queue_input *input)
{
union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt;
memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt));
mes_remove_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
mes_remove_queue_pkt.header.opcode = MES_SCH_API_REMOVE_QUEUE;
mes_remove_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset;
mes_remove_queue_pkt.gang_context_addr = 0;
mes_remove_queue_pkt.pipe_id = input->pipe_id;
mes_remove_queue_pkt.queue_id = input->queue_id;
if (input->action == PREEMPT_QUEUES_NO_UNMAP) {
mes_remove_queue_pkt.preempt_legacy_gfx_queue = 1;
mes_remove_queue_pkt.tf_addr = input->trail_fence_addr;
mes_remove_queue_pkt.tf_data =
lower_32_bits(input->trail_fence_data);
} else {
mes_remove_queue_pkt.unmap_legacy_queue = 1;
mes_remove_queue_pkt.queue_type =
convert_to_mes_queue_type(input->queue_type);
}
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
offsetof(union MESAPI__REMOVE_QUEUE, api_status));
}
static int mes_v11_0_suspend_gang(struct amdgpu_mes *mes,
struct mes_suspend_gang_input *input)
{
return 0;
}
static int mes_v11_0_resume_gang(struct amdgpu_mes *mes,
struct mes_resume_gang_input *input)
{
return 0;
}
static int mes_v11_0_query_sched_status(struct amdgpu_mes *mes)
{
union MESAPI__QUERY_MES_STATUS mes_status_pkt;
memset(&mes_status_pkt, 0, sizeof(mes_status_pkt));
mes_status_pkt.header.type = MES_API_TYPE_SCHEDULER;
mes_status_pkt.header.opcode = MES_SCH_API_QUERY_SCHEDULER_STATUS;
mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_status_pkt, sizeof(mes_status_pkt),
offsetof(union MESAPI__QUERY_MES_STATUS, api_status));
}
static int mes_v11_0_misc_op(struct amdgpu_mes *mes,
struct mes_misc_op_input *input)
{
union MESAPI__MISC misc_pkt;
memset(&misc_pkt, 0, sizeof(misc_pkt));
misc_pkt.header.type = MES_API_TYPE_SCHEDULER;
misc_pkt.header.opcode = MES_SCH_API_MISC;
misc_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
switch (input->op) {
case MES_MISC_OP_READ_REG:
misc_pkt.opcode = MESAPI_MISC__READ_REG;
misc_pkt.read_reg.reg_offset = input->read_reg.reg_offset;
misc_pkt.read_reg.buffer_addr = input->read_reg.buffer_addr;
break;
case MES_MISC_OP_WRITE_REG:
misc_pkt.opcode = MESAPI_MISC__WRITE_REG;
misc_pkt.write_reg.reg_offset = input->write_reg.reg_offset;
misc_pkt.write_reg.reg_value = input->write_reg.reg_value;
break;
case MES_MISC_OP_WRM_REG_WAIT:
misc_pkt.opcode = MESAPI_MISC__WAIT_REG_MEM;
misc_pkt.wait_reg_mem.op = WRM_OPERATION__WAIT_REG_MEM;
misc_pkt.wait_reg_mem.reference = input->wrm_reg.ref;
misc_pkt.wait_reg_mem.mask = input->wrm_reg.mask;
misc_pkt.wait_reg_mem.reg_offset1 = input->wrm_reg.reg0;
misc_pkt.wait_reg_mem.reg_offset2 = 0;
break;
case MES_MISC_OP_WRM_REG_WR_WAIT:
misc_pkt.opcode = MESAPI_MISC__WAIT_REG_MEM;
misc_pkt.wait_reg_mem.op = WRM_OPERATION__WR_WAIT_WR_REG;
misc_pkt.wait_reg_mem.reference = input->wrm_reg.ref;
misc_pkt.wait_reg_mem.mask = input->wrm_reg.mask;
misc_pkt.wait_reg_mem.reg_offset1 = input->wrm_reg.reg0;
misc_pkt.wait_reg_mem.reg_offset2 = input->wrm_reg.reg1;
break;
case MES_MISC_OP_SET_SHADER_DEBUGGER:
misc_pkt.opcode = MESAPI_MISC__SET_SHADER_DEBUGGER;
misc_pkt.set_shader_debugger.process_context_addr =
input->set_shader_debugger.process_context_addr;
misc_pkt.set_shader_debugger.flags.u32all =
input->set_shader_debugger.flags.u32all;
misc_pkt.set_shader_debugger.spi_gdbg_per_vmid_cntl =
input->set_shader_debugger.spi_gdbg_per_vmid_cntl;
memcpy(misc_pkt.set_shader_debugger.tcp_watch_cntl,
input->set_shader_debugger.tcp_watch_cntl,
sizeof(misc_pkt.set_shader_debugger.tcp_watch_cntl));
misc_pkt.set_shader_debugger.trap_en = input->set_shader_debugger.trap_en;
break;
default:
DRM_ERROR("unsupported misc op (%d) \n", input->op);
return -EINVAL;
}
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&misc_pkt, sizeof(misc_pkt),
offsetof(union MESAPI__MISC, api_status));
}
static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
{
int i;
struct amdgpu_device *adev = mes->adev;
union MESAPI_SET_HW_RESOURCES mes_set_hw_res_pkt;
memset(&mes_set_hw_res_pkt, 0, sizeof(mes_set_hw_res_pkt));
mes_set_hw_res_pkt.header.type = MES_API_TYPE_SCHEDULER;
mes_set_hw_res_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC;
mes_set_hw_res_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
mes_set_hw_res_pkt.vmid_mask_mmhub = mes->vmid_mask_mmhub;
mes_set_hw_res_pkt.vmid_mask_gfxhub = mes->vmid_mask_gfxhub;
mes_set_hw_res_pkt.gds_size = adev->gds.gds_size;
mes_set_hw_res_pkt.paging_vmid = 0;
mes_set_hw_res_pkt.g_sch_ctx_gpu_mc_ptr = mes->sch_ctx_gpu_addr;
mes_set_hw_res_pkt.query_status_fence_gpu_mc_ptr =
mes->query_status_fence_gpu_addr;
for (i = 0; i < MAX_COMPUTE_PIPES; i++)
mes_set_hw_res_pkt.compute_hqd_mask[i] =
mes->compute_hqd_mask[i];
for (i = 0; i < MAX_GFX_PIPES; i++)
mes_set_hw_res_pkt.gfx_hqd_mask[i] = mes->gfx_hqd_mask[i];
for (i = 0; i < MAX_SDMA_PIPES; i++)
mes_set_hw_res_pkt.sdma_hqd_mask[i] = mes->sdma_hqd_mask[i];
for (i = 0; i < AMD_PRIORITY_NUM_LEVELS; i++)
mes_set_hw_res_pkt.aggregated_doorbells[i] =
mes->aggregated_doorbells[i];
for (i = 0; i < 5; i++) {
mes_set_hw_res_pkt.gc_base[i] = adev->reg_offset[GC_HWIP][0][i];
mes_set_hw_res_pkt.mmhub_base[i] =
adev->reg_offset[MMHUB_HWIP][0][i];
mes_set_hw_res_pkt.osssys_base[i] =
adev->reg_offset[OSSSYS_HWIP][0][i];
}
mes_set_hw_res_pkt.disable_reset = 1;
mes_set_hw_res_pkt.disable_mes_log = 1;
mes_set_hw_res_pkt.use_different_vmid_compute = 1;
mes_set_hw_res_pkt.enable_reg_active_poll = 1;
mes_set_hw_res_pkt.oversubscription_timer = 50;
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
offsetof(union MESAPI_SET_HW_RESOURCES, api_status));
}
static void mes_v11_0_init_aggregated_doorbell(struct amdgpu_mes *mes)
{
struct amdgpu_device *adev = mes->adev;
uint32_t data;
data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL1);
data &= ~(CP_MES_DOORBELL_CONTROL1__DOORBELL_OFFSET_MASK |
CP_MES_DOORBELL_CONTROL1__DOORBELL_EN_MASK |
CP_MES_DOORBELL_CONTROL1__DOORBELL_HIT_MASK);
data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_LOW] <<
CP_MES_DOORBELL_CONTROL1__DOORBELL_OFFSET__SHIFT;
data |= 1 << CP_MES_DOORBELL_CONTROL1__DOORBELL_EN__SHIFT;
WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL1, data);
data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL2);
data &= ~(CP_MES_DOORBELL_CONTROL2__DOORBELL_OFFSET_MASK |
CP_MES_DOORBELL_CONTROL2__DOORBELL_EN_MASK |
CP_MES_DOORBELL_CONTROL2__DOORBELL_HIT_MASK);
data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_NORMAL] <<
CP_MES_DOORBELL_CONTROL2__DOORBELL_OFFSET__SHIFT;
data |= 1 << CP_MES_DOORBELL_CONTROL2__DOORBELL_EN__SHIFT;
WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL2, data);
data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL3);
data &= ~(CP_MES_DOORBELL_CONTROL3__DOORBELL_OFFSET_MASK |
CP_MES_DOORBELL_CONTROL3__DOORBELL_EN_MASK |
CP_MES_DOORBELL_CONTROL3__DOORBELL_HIT_MASK);
data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_MEDIUM] <<
CP_MES_DOORBELL_CONTROL3__DOORBELL_OFFSET__SHIFT;
data |= 1 << CP_MES_DOORBELL_CONTROL3__DOORBELL_EN__SHIFT;
WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL3, data);
data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL4);
data &= ~(CP_MES_DOORBELL_CONTROL4__DOORBELL_OFFSET_MASK |
CP_MES_DOORBELL_CONTROL4__DOORBELL_EN_MASK |
CP_MES_DOORBELL_CONTROL4__DOORBELL_HIT_MASK);
data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_HIGH] <<
CP_MES_DOORBELL_CONTROL4__DOORBELL_OFFSET__SHIFT;
data |= 1 << CP_MES_DOORBELL_CONTROL4__DOORBELL_EN__SHIFT;
WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL4, data);
data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL5);
data &= ~(CP_MES_DOORBELL_CONTROL5__DOORBELL_OFFSET_MASK |
CP_MES_DOORBELL_CONTROL5__DOORBELL_EN_MASK |
CP_MES_DOORBELL_CONTROL5__DOORBELL_HIT_MASK);
data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_REALTIME] <<
CP_MES_DOORBELL_CONTROL5__DOORBELL_OFFSET__SHIFT;
data |= 1 << CP_MES_DOORBELL_CONTROL5__DOORBELL_EN__SHIFT;
WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL5, data);
data = 1 << CP_HQD_GFX_CONTROL__DB_UPDATED_MSG_EN__SHIFT;
WREG32_SOC15(GC, 0, regCP_HQD_GFX_CONTROL, data);
}
static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
.add_hw_queue = mes_v11_0_add_hw_queue,
.remove_hw_queue = mes_v11_0_remove_hw_queue,
.unmap_legacy_queue = mes_v11_0_unmap_legacy_queue,
.suspend_gang = mes_v11_0_suspend_gang,
.resume_gang = mes_v11_0_resume_gang,
.misc_op = mes_v11_0_misc_op,
};
static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev,
enum admgpu_mes_pipe pipe)
{
int r;
const struct mes_firmware_header_v1_0 *mes_hdr;
const __le32 *fw_data;
unsigned fw_size;
mes_hdr = (const struct mes_firmware_header_v1_0 *)
adev->mes.fw[pipe]->data;
fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
le32_to_cpu(mes_hdr->mes_ucode_offset_bytes));
fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
r = amdgpu_bo_create_reserved(adev, fw_size,
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->mes.ucode_fw_obj[pipe],
&adev->mes.ucode_fw_gpu_addr[pipe],
(void **)&adev->mes.ucode_fw_ptr[pipe]);
if (r) {
dev_err(adev->dev, "(%d) failed to create mes fw bo\n", r);
return r;
}
memcpy(adev->mes.ucode_fw_ptr[pipe], fw_data, fw_size);
amdgpu_bo_kunmap(adev->mes.ucode_fw_obj[pipe]);
amdgpu_bo_unreserve(adev->mes.ucode_fw_obj[pipe]);
return 0;
}
static int mes_v11_0_allocate_ucode_data_buffer(struct amdgpu_device *adev,
enum admgpu_mes_pipe pipe)
{
int r;
const struct mes_firmware_header_v1_0 *mes_hdr;
const __le32 *fw_data;
unsigned fw_size;
mes_hdr = (const struct mes_firmware_header_v1_0 *)
adev->mes.fw[pipe]->data;
fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes));
fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
r = amdgpu_bo_create_reserved(adev, fw_size,
64 * 1024,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->mes.data_fw_obj[pipe],
&adev->mes.data_fw_gpu_addr[pipe],
(void **)&adev->mes.data_fw_ptr[pipe]);
if (r) {
dev_err(adev->dev, "(%d) failed to create mes data fw bo\n", r);
return r;
}
memcpy(adev->mes.data_fw_ptr[pipe], fw_data, fw_size);
amdgpu_bo_kunmap(adev->mes.data_fw_obj[pipe]);
amdgpu_bo_unreserve(adev->mes.data_fw_obj[pipe]);
return 0;
}
static void mes_v11_0_free_ucode_buffers(struct amdgpu_device *adev,
enum admgpu_mes_pipe pipe)
{
amdgpu_bo_free_kernel(&adev->mes.data_fw_obj[pipe],
&adev->mes.data_fw_gpu_addr[pipe],
(void **)&adev->mes.data_fw_ptr[pipe]);
amdgpu_bo_free_kernel(&adev->mes.ucode_fw_obj[pipe],
&adev->mes.ucode_fw_gpu_addr[pipe],
(void **)&adev->mes.ucode_fw_ptr[pipe]);
}
static void mes_v11_0_enable(struct amdgpu_device *adev, bool enable)
{
uint64_t ucode_addr;
uint32_t pipe, data = 0;
if (enable) {
data = RREG32_SOC15(GC, 0, regCP_MES_CNTL);
data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
data = REG_SET_FIELD(data, CP_MES_CNTL,
MES_PIPE1_RESET, adev->enable_mes_kiq ? 1 : 0);
WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
mutex_lock(&adev->srbm_mutex);
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
if (!adev->enable_mes_kiq &&
pipe == AMDGPU_MES_KIQ_PIPE)
continue;
soc21_grbm_select(adev, 3, pipe, 0, 0);
ucode_addr = adev->mes.uc_start_addr[pipe] >> 2;
WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START,
lower_32_bits(ucode_addr));
WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START_HI,
upper_32_bits(ucode_addr));
}
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
/* unhalt MES and activate pipe0 */
data = REG_SET_FIELD(0, CP_MES_CNTL, MES_PIPE0_ACTIVE, 1);
data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE,
adev->enable_mes_kiq ? 1 : 0);
WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
if (amdgpu_emu_mode)
msleep(100);
else
udelay(50);
} else {
data = RREG32_SOC15(GC, 0, regCP_MES_CNTL);
data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_ACTIVE, 0);
data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE, 0);
data = REG_SET_FIELD(data, CP_MES_CNTL,
MES_INVALIDATE_ICACHE, 1);
data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET,
adev->enable_mes_kiq ? 1 : 0);
data = REG_SET_FIELD(data, CP_MES_CNTL, MES_HALT, 1);
WREG32_SOC15(GC, 0, regCP_MES_CNTL, data);
}
}
/* This function is for backdoor MES firmware */
static int mes_v11_0_load_microcode(struct amdgpu_device *adev,
enum admgpu_mes_pipe pipe, bool prime_icache)
{
int r;
uint32_t data;
uint64_t ucode_addr;
mes_v11_0_enable(adev, false);
if (!adev->mes.fw[pipe])
return -EINVAL;
r = mes_v11_0_allocate_ucode_buffer(adev, pipe);
if (r)
return r;
r = mes_v11_0_allocate_ucode_data_buffer(adev, pipe);
if (r) {
mes_v11_0_free_ucode_buffers(adev, pipe);
return r;
}
mutex_lock(&adev->srbm_mutex);
/* me=3, pipe=0, queue=0 */
soc21_grbm_select(adev, 3, pipe, 0, 0);
WREG32_SOC15(GC, 0, regCP_MES_IC_BASE_CNTL, 0);
/* set ucode start address */
ucode_addr = adev->mes.uc_start_addr[pipe] >> 2;
WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START,
lower_32_bits(ucode_addr));
WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START_HI,
upper_32_bits(ucode_addr));
/* set ucode fimrware address */
WREG32_SOC15(GC, 0, regCP_MES_IC_BASE_LO,
lower_32_bits(adev->mes.ucode_fw_gpu_addr[pipe]));
WREG32_SOC15(GC, 0, regCP_MES_IC_BASE_HI,
upper_32_bits(adev->mes.ucode_fw_gpu_addr[pipe]));
/* set ucode instruction cache boundary to 2M-1 */
WREG32_SOC15(GC, 0, regCP_MES_MIBOUND_LO, 0x1FFFFF);
/* set ucode data firmware address */
WREG32_SOC15(GC, 0, regCP_MES_MDBASE_LO,
lower_32_bits(adev->mes.data_fw_gpu_addr[pipe]));
WREG32_SOC15(GC, 0, regCP_MES_MDBASE_HI,
upper_32_bits(adev->mes.data_fw_gpu_addr[pipe]));
/* Set 0x3FFFF (256K-1) to CP_MES_MDBOUND_LO */
WREG32_SOC15(GC, 0, regCP_MES_MDBOUND_LO, 0x3FFFF);
if (prime_icache) {
/* invalidate ICACHE */
data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL);
data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 0);
data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1);
WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data);
/* prime the ICACHE. */
data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL);
data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 1);
WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data);
}
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
return 0;
}
static int mes_v11_0_allocate_eop_buf(struct amdgpu_device *adev,
enum admgpu_mes_pipe pipe)
{
int r;
u32 *eop;
r = amdgpu_bo_create_reserved(adev, MES_EOP_SIZE, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&adev->mes.eop_gpu_obj[pipe],
&adev->mes.eop_gpu_addr[pipe],
(void **)&eop);
if (r) {
dev_warn(adev->dev, "(%d) create EOP bo failed\n", r);
return r;
}
memset(eop, 0,
adev->mes.eop_gpu_obj[pipe]->tbo.base.size);
amdgpu_bo_kunmap(adev->mes.eop_gpu_obj[pipe]);
amdgpu_bo_unreserve(adev->mes.eop_gpu_obj[pipe]);
return 0;
}
static int mes_v11_0_mqd_init(struct amdgpu_ring *ring)
{
struct v11_compute_mqd *mqd = ring->mqd_ptr;
uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
uint32_t tmp;
memset(mqd, 0, sizeof(*mqd));
mqd->header = 0xC0310800;
mqd->compute_pipelinestat_enable = 0x00000001;
mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
mqd->compute_misc_reserved = 0x00000007;
eop_base_addr = ring->eop_gpu_addr >> 8;
/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
tmp = regCP_HQD_EOP_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
(order_base_2(MES_EOP_SIZE / 4) - 1));
mqd->cp_hqd_eop_base_addr_lo = lower_32_bits(eop_base_addr);
mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
mqd->cp_hqd_eop_control = tmp;
/* disable the queue if it's active */
ring->wptr = 0;
mqd->cp_hqd_pq_rptr = 0;
mqd->cp_hqd_pq_wptr_lo = 0;
mqd->cp_hqd_pq_wptr_hi = 0;
/* set the pointer to the MQD */
mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
/* set MQD vmid to 0 */
tmp = regCP_MQD_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
mqd->cp_mqd_control = tmp;
/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
hqd_gpu_addr = ring->gpu_addr >> 8;
mqd->cp_hqd_pq_base_lo = lower_32_bits(hqd_gpu_addr);
mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
/* set the wb address whether it's enabled or not */
wb_gpu_addr = ring->rptr_gpu_addr;
mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
mqd->cp_hqd_pq_rptr_report_addr_hi =
upper_32_bits(wb_gpu_addr) & 0xffff;
/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
wb_gpu_addr = ring->wptr_gpu_addr;
mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffff8;
mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
/* set up the HQD, this is similar to CP_RB0_CNTL */
tmp = regCP_HQD_PQ_CONTROL_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
(order_base_2(ring->ring_size / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, NO_UPDATE_RPTR, 1);
mqd->cp_hqd_pq_control = tmp;
/* enable doorbell */
tmp = 0;
if (ring->use_doorbell) {
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_OFFSET, ring->doorbell_index);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_EN, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_SOURCE, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_HIT, 0);
} else
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_EN, 0);
mqd->cp_hqd_pq_doorbell_control = tmp;
mqd->cp_hqd_vmid = 0;
/* activate the queue */
mqd->cp_hqd_active = 1;
tmp = regCP_HQD_PERSISTENT_STATE_DEFAULT;
tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE,
PRELOAD_SIZE, 0x55);
mqd->cp_hqd_persistent_state = tmp;
mqd->cp_hqd_ib_control = regCP_HQD_IB_CONTROL_DEFAULT;
mqd->cp_hqd_iq_timer = regCP_HQD_IQ_TIMER_DEFAULT;
mqd->cp_hqd_quantum = regCP_HQD_QUANTUM_DEFAULT;
amdgpu_device_flush_hdp(ring->adev, NULL);
return 0;
}
static void mes_v11_0_queue_init_register(struct amdgpu_ring *ring)
{
struct v11_compute_mqd *mqd = ring->mqd_ptr;
struct amdgpu_device *adev = ring->adev;
uint32_t data = 0;
mutex_lock(&adev->srbm_mutex);
soc21_grbm_select(adev, 3, ring->pipe, 0, 0);
/* set CP_HQD_VMID.VMID = 0. */
data = RREG32_SOC15(GC, 0, regCP_HQD_VMID);
data = REG_SET_FIELD(data, CP_HQD_VMID, VMID, 0);
WREG32_SOC15(GC, 0, regCP_HQD_VMID, data);
/* set CP_HQD_PQ_DOORBELL_CONTROL.DOORBELL_EN=0 */
data = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_EN, 0);
WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, data);
/* set CP_MQD_BASE_ADDR/HI with the MQD base address */
WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr_lo);
WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi);
/* set CP_MQD_CONTROL.VMID=0 */
data = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL);
data = REG_SET_FIELD(data, CP_MQD_CONTROL, VMID, 0);
WREG32_SOC15(GC, 0, regCP_MQD_CONTROL, 0);
/* set CP_HQD_PQ_BASE/HI with the ring buffer base address */
WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE, mqd->cp_hqd_pq_base_lo);
WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI, mqd->cp_hqd_pq_base_hi);
/* set CP_HQD_PQ_RPTR_REPORT_ADDR/HI */
WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR,
mqd->cp_hqd_pq_rptr_report_addr_lo);
WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
mqd->cp_hqd_pq_rptr_report_addr_hi);
/* set CP_HQD_PQ_CONTROL */
WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL, mqd->cp_hqd_pq_control);
/* set CP_HQD_PQ_WPTR_POLL_ADDR/HI */
WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR,
mqd->cp_hqd_pq_wptr_poll_addr_lo);
WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
mqd->cp_hqd_pq_wptr_poll_addr_hi);
/* set CP_HQD_PQ_DOORBELL_CONTROL */
WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL,
mqd->cp_hqd_pq_doorbell_control);
/* set CP_HQD_PERSISTENT_STATE.PRELOAD_SIZE=0x53 */
WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE, mqd->cp_hqd_persistent_state);
/* set CP_HQD_ACTIVE.ACTIVE=1 */
WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, mqd->cp_hqd_active);
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
static int mes_v11_0_kiq_enable_queue(struct amdgpu_device *adev)
{
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
int r;
if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
return -EINVAL;
r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
if (r) {
DRM_ERROR("Failed to lock KIQ (%d).\n", r);
return r;
}
kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring);
return amdgpu_ring_test_helper(kiq_ring);
}
static int mes_v11_0_queue_init(struct amdgpu_device *adev,
enum admgpu_mes_pipe pipe)
{
struct amdgpu_ring *ring;
int r;
if (pipe == AMDGPU_MES_KIQ_PIPE)
ring = &adev->gfx.kiq[0].ring;
else if (pipe == AMDGPU_MES_SCHED_PIPE)
ring = &adev->mes.ring;
else
BUG();
if ((pipe == AMDGPU_MES_SCHED_PIPE) &&
(amdgpu_in_reset(adev) || adev->in_suspend)) {
*(ring->wptr_cpu_addr) = 0;
*(ring->rptr_cpu_addr) = 0;
amdgpu_ring_clear_ring(ring);
}
r = mes_v11_0_mqd_init(ring);
if (r)
return r;
if (pipe == AMDGPU_MES_SCHED_PIPE) {
r = mes_v11_0_kiq_enable_queue(adev);
if (r)
return r;
} else {
mes_v11_0_queue_init_register(ring);
}
/* get MES scheduler/KIQ versions */
mutex_lock(&adev->srbm_mutex);
soc21_grbm_select(adev, 3, pipe, 0, 0);
if (pipe == AMDGPU_MES_SCHED_PIPE)
adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq)
adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
return 0;
}
static int mes_v11_0_ring_init(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
ring = &adev->mes.ring;
ring->funcs = &mes_v11_0_ring_funcs;
ring->me = 3;
ring->pipe = 0;
ring->queue = 0;
ring->ring_obj = NULL;
ring->use_doorbell = true;
ring->doorbell_index = adev->doorbell_index.mes_ring0 << 1;
ring->eop_gpu_addr = adev->mes.eop_gpu_addr[AMDGPU_MES_SCHED_PIPE];
ring->no_scheduler = true;
sprintf(ring->name, "mes_%d.%d.%d", ring->me, ring->pipe, ring->queue);
return amdgpu_ring_init(adev, ring, 1024, NULL, 0,
AMDGPU_RING_PRIO_DEFAULT, NULL);
}
static int mes_v11_0_kiq_ring_init(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
spin_lock_init(&adev->gfx.kiq[0].ring_lock);
ring = &adev->gfx.kiq[0].ring;
ring->me = 3;
ring->pipe = 1;
ring->queue = 0;
ring->adev = NULL;
ring->ring_obj = NULL;
ring->use_doorbell = true;
ring->doorbell_index = adev->doorbell_index.mes_ring1 << 1;
ring->eop_gpu_addr = adev->mes.eop_gpu_addr[AMDGPU_MES_KIQ_PIPE];
ring->no_scheduler = true;
sprintf(ring->name, "mes_kiq_%d.%d.%d",
ring->me, ring->pipe, ring->queue);
return amdgpu_ring_init(adev, ring, 1024, NULL, 0,
AMDGPU_RING_PRIO_DEFAULT, NULL);
}
static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev,
enum admgpu_mes_pipe pipe)
{
int r, mqd_size = sizeof(struct v11_compute_mqd);
struct amdgpu_ring *ring;
if (pipe == AMDGPU_MES_KIQ_PIPE)
ring = &adev->gfx.kiq[0].ring;
else if (pipe == AMDGPU_MES_SCHED_PIPE)
ring = &adev->mes.ring;
else
BUG();
if (ring->mqd_obj)
return 0;
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
&ring->mqd_gpu_addr, &ring->mqd_ptr);
if (r) {
dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
return r;
}
memset(ring->mqd_ptr, 0, mqd_size);
/* prepare MQD backup */
adev->mes.mqd_backup[pipe] = kmalloc(mqd_size, GFP_KERNEL);
if (!adev->mes.mqd_backup[pipe]) {
dev_warn(adev->dev,
"no memory to create MQD backup for ring %s\n",
ring->name);
return -ENOMEM;
}
return 0;
}
static int mes_v11_0_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int pipe, r;
adev->mes.funcs = &mes_v11_0_funcs;
adev->mes.kiq_hw_init = &mes_v11_0_kiq_hw_init;
adev->mes.kiq_hw_fini = &mes_v11_0_kiq_hw_fini;
r = amdgpu_mes_init(adev);
if (r)
return r;
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
continue;
r = mes_v11_0_allocate_eop_buf(adev, pipe);
if (r)
return r;
r = mes_v11_0_mqd_sw_init(adev, pipe);
if (r)
return r;
}
if (adev->enable_mes_kiq) {
r = mes_v11_0_kiq_ring_init(adev);
if (r)
return r;
}
r = mes_v11_0_ring_init(adev);
if (r)
return r;
return 0;
}
static int mes_v11_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int pipe;
amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
kfree(adev->mes.mqd_backup[pipe]);
amdgpu_bo_free_kernel(&adev->mes.eop_gpu_obj[pipe],
&adev->mes.eop_gpu_addr[pipe],
NULL);
amdgpu_ucode_release(&adev->mes.fw[pipe]);
}
amdgpu_bo_free_kernel(&adev->gfx.kiq[0].ring.mqd_obj,
&adev->gfx.kiq[0].ring.mqd_gpu_addr,
&adev->gfx.kiq[0].ring.mqd_ptr);
amdgpu_bo_free_kernel(&adev->mes.ring.mqd_obj,
&adev->mes.ring.mqd_gpu_addr,
&adev->mes.ring.mqd_ptr);
amdgpu_ring_fini(&adev->gfx.kiq[0].ring);
amdgpu_ring_fini(&adev->mes.ring);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
mes_v11_0_free_ucode_buffers(adev, AMDGPU_MES_KIQ_PIPE);
mes_v11_0_free_ucode_buffers(adev, AMDGPU_MES_SCHED_PIPE);
}
amdgpu_mes_fini(adev);
return 0;
}
static void mes_v11_0_kiq_dequeue(struct amdgpu_ring *ring)
{
uint32_t data;
int i;
struct amdgpu_device *adev = ring->adev;
mutex_lock(&adev->srbm_mutex);
soc21_grbm_select(adev, 3, ring->pipe, 0, 0);
/* disable the queue if it's active */
if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) {
WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1);
for (i = 0; i < adev->usec_timeout; i++) {
if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
break;
udelay(1);
}
}
data = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_EN, 0);
data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_HIT, 1);
WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, data);
WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 0);
WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 0);
WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 0);
WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR, 0);
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
static void mes_v11_0_kiq_setting(struct amdgpu_ring *ring)
{
uint32_t tmp;
struct amdgpu_device *adev = ring->adev;
/* tell RLC which is KIQ queue */
tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
tmp |= 0x80;
WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
}
static void mes_v11_0_kiq_clear(struct amdgpu_device *adev)
{
uint32_t tmp;
/* tell RLC which is KIQ dequeue */
tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
tmp &= ~RLC_CP_SCHEDULERS__scheduler0_MASK;
WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
}
static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev)
{
int r = 0;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
r = mes_v11_0_load_microcode(adev, AMDGPU_MES_SCHED_PIPE, false);
if (r) {
DRM_ERROR("failed to load MES fw, r=%d\n", r);
return r;
}
r = mes_v11_0_load_microcode(adev, AMDGPU_MES_KIQ_PIPE, true);
if (r) {
DRM_ERROR("failed to load MES kiq fw, r=%d\n", r);
return r;
}
}
mes_v11_0_enable(adev, true);
mes_v11_0_kiq_setting(&adev->gfx.kiq[0].ring);
r = mes_v11_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE);
if (r)
goto failure;
return r;
failure:
mes_v11_0_hw_fini(adev);
return r;
}
static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev)
{
if (adev->mes.ring.sched.ready) {
mes_v11_0_kiq_dequeue(&adev->mes.ring);
adev->mes.ring.sched.ready = false;
}
if (amdgpu_sriov_vf(adev)) {
mes_v11_0_kiq_dequeue(&adev->gfx.kiq[0].ring);
mes_v11_0_kiq_clear(adev);
}
mes_v11_0_enable(adev, false);
return 0;
}
static int mes_v11_0_hw_init(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->enable_mes_kiq) {
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
r = mes_v11_0_load_microcode(adev,
AMDGPU_MES_SCHED_PIPE, true);
if (r) {
DRM_ERROR("failed to MES fw, r=%d\n", r);
return r;
}
}
mes_v11_0_enable(adev, true);
}
r = mes_v11_0_queue_init(adev, AMDGPU_MES_SCHED_PIPE);
if (r)
goto failure;
r = mes_v11_0_set_hw_resources(&adev->mes);
if (r)
goto failure;
mes_v11_0_init_aggregated_doorbell(&adev->mes);
r = mes_v11_0_query_sched_status(&adev->mes);
if (r) {
DRM_ERROR("MES is busy\n");
goto failure;
}
/*
* Disable KIQ ring usage from the driver once MES is enabled.
* MES uses KIQ ring exclusively so driver cannot access KIQ ring
* with MES enabled.
*/
adev->gfx.kiq[0].ring.sched.ready = false;
adev->mes.ring.sched.ready = true;
return 0;
failure:
mes_v11_0_hw_fini(adev);
return r;
}
static int mes_v11_0_hw_fini(void *handle)
{
return 0;
}
static int mes_v11_0_suspend(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = amdgpu_mes_suspend(adev);
if (r)
return r;
return mes_v11_0_hw_fini(adev);
}
static int mes_v11_0_resume(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = mes_v11_0_hw_init(adev);
if (r)
return r;
return amdgpu_mes_resume(adev);
}
static int mes_v11_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int pipe, r;
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
continue;
r = amdgpu_mes_init_microcode(adev, pipe);
if (r)
return r;
}
return 0;
}
static int mes_v11_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* it's only intended for use in mes_self_test case, not for s0ix and reset */
if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend &&
(adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3)))
amdgpu_mes_self_test(adev);
return 0;
}
static const struct amd_ip_funcs mes_v11_0_ip_funcs = {
.name = "mes_v11_0",
.early_init = mes_v11_0_early_init,
.late_init = mes_v11_0_late_init,
.sw_init = mes_v11_0_sw_init,
.sw_fini = mes_v11_0_sw_fini,
.hw_init = mes_v11_0_hw_init,
.hw_fini = mes_v11_0_hw_fini,
.suspend = mes_v11_0_suspend,
.resume = mes_v11_0_resume,
};
const struct amdgpu_ip_block_version mes_v11_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_MES,
.major = 11,
.minor = 0,
.rev = 0,
.funcs = &mes_v11_0_ip_funcs,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/mes_v11_0.c |
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/pci.h>
#include "amdgpu.h"
#include "amdgpu_ih.h"
#include "vid.h"
#include "oss/oss_3_0_d.h"
#include "oss/oss_3_0_sh_mask.h"
#include "bif/bif_5_1_d.h"
#include "bif/bif_5_1_sh_mask.h"
/*
* Interrupts
* Starting with r6xx, interrupts are handled via a ring buffer.
* Ring buffers are areas of GPU accessible memory that the GPU
* writes interrupt vectors into and the host reads vectors out of.
* There is a rptr (read pointer) that determines where the
* host is currently reading, and a wptr (write pointer)
* which determines where the GPU has written. When the
* pointers are equal, the ring is idle. When the GPU
* writes vectors to the ring buffer, it increments the
* wptr. When there is an interrupt, the host then starts
* fetching commands and processing them until the pointers are
* equal again at which point it updates the rptr.
*/
static void tonga_ih_set_interrupt_funcs(struct amdgpu_device *adev);
/**
* tonga_ih_enable_interrupts - Enable the interrupt ring buffer
*
* @adev: amdgpu_device pointer
*
* Enable the interrupt ring buffer (VI).
*/
static void tonga_ih_enable_interrupts(struct amdgpu_device *adev)
{
u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
WREG32(mmIH_RB_CNTL, ih_rb_cntl);
adev->irq.ih.enabled = true;
}
/**
* tonga_ih_disable_interrupts - Disable the interrupt ring buffer
*
* @adev: amdgpu_device pointer
*
* Disable the interrupt ring buffer (VI).
*/
static void tonga_ih_disable_interrupts(struct amdgpu_device *adev)
{
u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
WREG32(mmIH_RB_CNTL, ih_rb_cntl);
/* set rptr, wptr to 0 */
WREG32(mmIH_RB_RPTR, 0);
WREG32(mmIH_RB_WPTR, 0);
adev->irq.ih.enabled = false;
adev->irq.ih.rptr = 0;
}
/**
* tonga_ih_irq_init - init and enable the interrupt ring
*
* @adev: amdgpu_device pointer
*
* Allocate a ring buffer for the interrupt controller,
* enable the RLC, disable interrupts, enable the IH
* ring buffer and enable it (VI).
* Called at device load and reume.
* Returns 0 for success, errors for failure.
*/
static int tonga_ih_irq_init(struct amdgpu_device *adev)
{
u32 interrupt_cntl, ih_rb_cntl, ih_doorbell_rtpr;
struct amdgpu_ih_ring *ih = &adev->irq.ih;
int rb_bufsz;
/* disable irqs */
tonga_ih_disable_interrupts(adev);
/* setup interrupt control */
WREG32(mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
interrupt_cntl = RREG32(mmINTERRUPT_CNTL);
/* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
*/
interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
/* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
WREG32(mmINTERRUPT_CNTL, interrupt_cntl);
/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
WREG32(mmIH_RB_BASE, ih->gpu_addr >> 8);
rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
ih_rb_cntl = REG_SET_FIELD(0, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
/* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register value is written to memory */
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
if (adev->irq.msi_enabled)
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM, 1);
WREG32(mmIH_RB_CNTL, ih_rb_cntl);
/* set the writeback address whether it's enabled or not */
WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr));
WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF);
/* set rptr, wptr to 0 */
WREG32(mmIH_RB_RPTR, 0);
WREG32(mmIH_RB_WPTR, 0);
ih_doorbell_rtpr = RREG32(mmIH_DOORBELL_RPTR);
if (adev->irq.ih.use_doorbell) {
ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR,
OFFSET, adev->irq.ih.doorbell_index);
ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR,
ENABLE, 1);
} else {
ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR,
ENABLE, 0);
}
WREG32(mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
pci_set_master(adev->pdev);
/* enable interrupts */
tonga_ih_enable_interrupts(adev);
return 0;
}
/**
* tonga_ih_irq_disable - disable interrupts
*
* @adev: amdgpu_device pointer
*
* Disable interrupts on the hw (VI).
*/
static void tonga_ih_irq_disable(struct amdgpu_device *adev)
{
tonga_ih_disable_interrupts(adev);
/* Wait and acknowledge irq */
mdelay(1);
}
/**
* tonga_ih_get_wptr - get the IH ring buffer wptr
*
* @adev: amdgpu_device pointer
* @ih: IH ring buffer to fetch wptr
*
* Get the IH ring buffer wptr from either the register
* or the writeback memory buffer (VI). Also check for
* ring buffer overflow and deal with it.
* Used by cz_irq_process(VI).
* Returns the value of the wptr.
*/
static u32 tonga_ih_get_wptr(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
u32 wptr, tmp;
wptr = le32_to_cpu(*ih->wptr_cpu);
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
/* Double check that the overflow wasn't already cleared. */
wptr = RREG32(mmIH_RB_WPTR);
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
/* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 16). Hopefully
* this should allow us to catchup.
*/
dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
ih->rptr = (wptr + 16) & ih->ptr_mask;
tmp = RREG32(mmIH_RB_CNTL);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32(mmIH_RB_CNTL, tmp);
out:
return (wptr & ih->ptr_mask);
}
/**
* tonga_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
* @ih: IH ring buffer to decode
* @entry: IV entry to place decoded information into
*
* Decodes the interrupt vector at the current rptr
* position and also advance the position.
*/
static void tonga_ih_decode_iv(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih,
struct amdgpu_iv_entry *entry)
{
/* wptr/rptr are in bytes! */
u32 ring_index = ih->rptr >> 2;
uint32_t dw[4];
dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
entry->src_id = dw[0] & 0xff;
entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff;
entry->vmid = (dw[2] >> 8) & 0xff;
entry->pasid = (dw[2] >> 16) & 0xffff;
/* wptr/rptr are in bytes! */
ih->rptr += 16;
}
/**
* tonga_ih_set_rptr - set the IH ring buffer rptr
*
* @adev: amdgpu_device pointer
* @ih: IH ring buffer to set rptr
*
* Set the IH ring buffer rptr.
*/
static void tonga_ih_set_rptr(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
if (ih->use_doorbell) {
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
WDOORBELL32(ih->doorbell_index, ih->rptr);
} else {
WREG32(mmIH_RB_RPTR, ih->rptr);
}
}
static int tonga_ih_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
ret = amdgpu_irq_add_domain(adev);
if (ret)
return ret;
tonga_ih_set_interrupt_funcs(adev);
return 0;
}
static int tonga_ih_sw_init(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, true);
if (r)
return r;
adev->irq.ih.use_doorbell = true;
adev->irq.ih.doorbell_index = adev->doorbell_index.ih;
r = amdgpu_irq_init(adev);
return r;
}
static int tonga_ih_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_fini_sw(adev);
amdgpu_irq_remove_domain(adev);
return 0;
}
static int tonga_ih_hw_init(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = tonga_ih_irq_init(adev);
if (r)
return r;
return 0;
}
static int tonga_ih_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
tonga_ih_irq_disable(adev);
return 0;
}
static int tonga_ih_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return tonga_ih_hw_fini(adev);
}
static int tonga_ih_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return tonga_ih_hw_init(adev);
}
static bool tonga_ih_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 tmp = RREG32(mmSRBM_STATUS);
if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY))
return false;
return true;
}
static int tonga_ih_wait_for_idle(void *handle)
{
unsigned i;
u32 tmp;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
/* read MC_STATUS */
tmp = RREG32(mmSRBM_STATUS);
if (!REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY))
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
static bool tonga_ih_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__IH_BUSY_MASK)
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET,
SOFT_RESET_IH, 1);
if (srbm_soft_reset) {
adev->irq.srbm_soft_reset = srbm_soft_reset;
return true;
} else {
adev->irq.srbm_soft_reset = 0;
return false;
}
}
static int tonga_ih_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->irq.srbm_soft_reset)
return 0;
return tonga_ih_hw_fini(adev);
}
static int tonga_ih_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->irq.srbm_soft_reset)
return 0;
return tonga_ih_hw_init(adev);
}
static int tonga_ih_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset;
if (!adev->irq.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->irq.srbm_soft_reset;
if (srbm_soft_reset) {
u32 tmp;
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(mmSRBM_SOFT_RESET, tmp);
tmp = RREG32(mmSRBM_SOFT_RESET);
udelay(50);
tmp &= ~srbm_soft_reset;
WREG32(mmSRBM_SOFT_RESET, tmp);
tmp = RREG32(mmSRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
}
return 0;
}
static int tonga_ih_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
return 0;
}
static int tonga_ih_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
return 0;
}
static const struct amd_ip_funcs tonga_ih_ip_funcs = {
.name = "tonga_ih",
.early_init = tonga_ih_early_init,
.late_init = NULL,
.sw_init = tonga_ih_sw_init,
.sw_fini = tonga_ih_sw_fini,
.hw_init = tonga_ih_hw_init,
.hw_fini = tonga_ih_hw_fini,
.suspend = tonga_ih_suspend,
.resume = tonga_ih_resume,
.is_idle = tonga_ih_is_idle,
.wait_for_idle = tonga_ih_wait_for_idle,
.check_soft_reset = tonga_ih_check_soft_reset,
.pre_soft_reset = tonga_ih_pre_soft_reset,
.soft_reset = tonga_ih_soft_reset,
.post_soft_reset = tonga_ih_post_soft_reset,
.set_clockgating_state = tonga_ih_set_clockgating_state,
.set_powergating_state = tonga_ih_set_powergating_state,
};
static const struct amdgpu_ih_funcs tonga_ih_funcs = {
.get_wptr = tonga_ih_get_wptr,
.decode_iv = tonga_ih_decode_iv,
.set_rptr = tonga_ih_set_rptr
};
static void tonga_ih_set_interrupt_funcs(struct amdgpu_device *adev)
{
adev->irq.ih_funcs = &tonga_ih_funcs;
}
const struct amdgpu_ip_block_version tonga_ih_ip_block = {
.type = AMD_IP_BLOCK_TYPE_IH,
.major = 3,
.minor = 0,
.rev = 0,
.funcs = &tonga_ih_ip_funcs,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/tonga_ih.c |
/*
* Copyright 2009 Jerome Glisse.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
/*
* Authors:
* Jerome Glisse <[email protected]>
* Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
* Dave Airlie
*/
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/dma-buf.h>
#include <drm/drm_drv.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_cache.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
/**
* DOC: amdgpu_object
*
* This defines the interfaces to operate on an &amdgpu_bo buffer object which
* represents memory used by driver (VRAM, system memory, etc.). The driver
* provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces
* to create/destroy/set buffer object which are then managed by the kernel TTM
* memory manager.
* The interfaces are also used internally by kernel clients, including gfx,
* uvd, etc. for kernel managed allocations used by the GPU.
*
*/
static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
{
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
amdgpu_bo_kunmap(bo);
if (bo->tbo.base.import_attach)
drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
drm_gem_object_release(&bo->tbo.base);
amdgpu_bo_unref(&bo->parent);
kvfree(bo);
}
static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo)
{
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
struct amdgpu_bo_user *ubo;
ubo = to_amdgpu_bo_user(bo);
kfree(ubo->metadata);
amdgpu_bo_destroy(tbo);
}
static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
struct amdgpu_bo *shadow_bo = ttm_to_amdgpu_bo(tbo), *bo;
struct amdgpu_bo_vm *vmbo;
bo = shadow_bo->parent;
vmbo = to_amdgpu_bo_vm(bo);
/* in case amdgpu_device_recover_vram got NULL of bo->parent */
if (!list_empty(&vmbo->shadow_list)) {
mutex_lock(&adev->shadow_list_lock);
list_del_init(&vmbo->shadow_list);
mutex_unlock(&adev->shadow_list_lock);
}
amdgpu_bo_destroy(tbo);
}
/**
* amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
* @bo: buffer object to be checked
*
* Uses destroy function associated with the object to determine if this is
* an &amdgpu_bo.
*
* Returns:
* true if the object belongs to &amdgpu_bo, false if not.
*/
bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
{
if (bo->destroy == &amdgpu_bo_destroy ||
bo->destroy == &amdgpu_bo_user_destroy ||
bo->destroy == &amdgpu_bo_vm_destroy)
return true;
return false;
}
/**
* amdgpu_bo_placement_from_domain - set buffer's placement
* @abo: &amdgpu_bo buffer object whose placement is to be set
* @domain: requested domain
*
* Sets buffer's placement according to requested domain and the buffer's
* flags.
*/
void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
struct ttm_placement *placement = &abo->placement;
struct ttm_place *places = abo->placements;
u64 flags = abo->flags;
u32 c = 0;
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
unsigned int visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
int8_t mem_id = KFD_XCP_MEM_ID(adev, abo->xcp_id);
if (adev->gmc.mem_partitions && mem_id >= 0) {
places[c].fpfn = adev->gmc.mem_partitions[mem_id].range.fpfn;
/*
* memory partition range lpfn is inclusive start + size - 1
* TTM place lpfn is exclusive start + size
*/
places[c].lpfn = adev->gmc.mem_partitions[mem_id].range.lpfn + 1;
} else {
places[c].fpfn = 0;
places[c].lpfn = 0;
}
places[c].mem_type = TTM_PL_VRAM;
places[c].flags = 0;
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
places[c].lpfn = min_not_zero(places[c].lpfn, visible_pfn);
else
places[c].flags |= TTM_PL_FLAG_TOPDOWN;
if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
c++;
}
if (domain & AMDGPU_GEM_DOMAIN_DOORBELL) {
places[c].fpfn = 0;
places[c].lpfn = 0;
places[c].mem_type = AMDGPU_PL_DOORBELL;
places[c].flags = 0;
c++;
}
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
places[c].fpfn = 0;
places[c].lpfn = 0;
places[c].mem_type =
abo->flags & AMDGPU_GEM_CREATE_PREEMPTIBLE ?
AMDGPU_PL_PREEMPT : TTM_PL_TT;
places[c].flags = 0;
c++;
}
if (domain & AMDGPU_GEM_DOMAIN_CPU) {
places[c].fpfn = 0;
places[c].lpfn = 0;
places[c].mem_type = TTM_PL_SYSTEM;
places[c].flags = 0;
c++;
}
if (domain & AMDGPU_GEM_DOMAIN_GDS) {
places[c].fpfn = 0;
places[c].lpfn = 0;
places[c].mem_type = AMDGPU_PL_GDS;
places[c].flags = 0;
c++;
}
if (domain & AMDGPU_GEM_DOMAIN_GWS) {
places[c].fpfn = 0;
places[c].lpfn = 0;
places[c].mem_type = AMDGPU_PL_GWS;
places[c].flags = 0;
c++;
}
if (domain & AMDGPU_GEM_DOMAIN_OA) {
places[c].fpfn = 0;
places[c].lpfn = 0;
places[c].mem_type = AMDGPU_PL_OA;
places[c].flags = 0;
c++;
}
if (!c) {
places[c].fpfn = 0;
places[c].lpfn = 0;
places[c].mem_type = TTM_PL_SYSTEM;
places[c].flags = 0;
c++;
}
BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS);
placement->num_placement = c;
placement->placement = places;
placement->num_busy_placement = c;
placement->busy_placement = places;
}
/**
* amdgpu_bo_create_reserved - create reserved BO for kernel use
*
* @adev: amdgpu device object
* @size: size for the new BO
* @align: alignment for the new BO
* @domain: where to place it
* @bo_ptr: used to initialize BOs in structures
* @gpu_addr: GPU addr of the pinned BO
* @cpu_addr: optional CPU address mapping
*
* Allocates and pins a BO for kernel internal use, and returns it still
* reserved.
*
* Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
*
* Returns:
* 0 on success, negative error code otherwise.
*/
int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
unsigned long size, int align,
u32 domain, struct amdgpu_bo **bo_ptr,
u64 *gpu_addr, void **cpu_addr)
{
struct amdgpu_bo_param bp;
bool free = false;
int r;
if (!size) {
amdgpu_bo_unref(bo_ptr);
return 0;
}
memset(&bp, 0, sizeof(bp));
bp.size = size;
bp.byte_align = align;
bp.domain = domain;
bp.flags = cpu_addr ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
: AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
bp.type = ttm_bo_type_kernel;
bp.resv = NULL;
bp.bo_ptr_size = sizeof(struct amdgpu_bo);
if (!*bo_ptr) {
r = amdgpu_bo_create(adev, &bp, bo_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
r);
return r;
}
free = true;
}
r = amdgpu_bo_reserve(*bo_ptr, false);
if (r) {
dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
goto error_free;
}
r = amdgpu_bo_pin(*bo_ptr, domain);
if (r) {
dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
goto error_unreserve;
}
r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
if (r) {
dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
goto error_unpin;
}
if (gpu_addr)
*gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
if (cpu_addr) {
r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
if (r) {
dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
goto error_unpin;
}
}
return 0;
error_unpin:
amdgpu_bo_unpin(*bo_ptr);
error_unreserve:
amdgpu_bo_unreserve(*bo_ptr);
error_free:
if (free)
amdgpu_bo_unref(bo_ptr);
return r;
}
/**
* amdgpu_bo_create_kernel - create BO for kernel use
*
* @adev: amdgpu device object
* @size: size for the new BO
* @align: alignment for the new BO
* @domain: where to place it
* @bo_ptr: used to initialize BOs in structures
* @gpu_addr: GPU addr of the pinned BO
* @cpu_addr: optional CPU address mapping
*
* Allocates and pins a BO for kernel internal use.
*
* Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
*
* Returns:
* 0 on success, negative error code otherwise.
*/
int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
unsigned long size, int align,
u32 domain, struct amdgpu_bo **bo_ptr,
u64 *gpu_addr, void **cpu_addr)
{
int r;
r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
gpu_addr, cpu_addr);
if (r)
return r;
if (*bo_ptr)
amdgpu_bo_unreserve(*bo_ptr);
return 0;
}
/**
* amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
*
* @adev: amdgpu device object
* @offset: offset of the BO
* @size: size of the BO
* @bo_ptr: used to initialize BOs in structures
* @cpu_addr: optional CPU address mapping
*
* Creates a kernel BO at a specific offset in VRAM.
*
* Returns:
* 0 on success, negative error code otherwise.
*/
int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
uint64_t offset, uint64_t size,
struct amdgpu_bo **bo_ptr, void **cpu_addr)
{
struct ttm_operation_ctx ctx = { false, false };
unsigned int i;
int r;
offset &= PAGE_MASK;
size = ALIGN(size, PAGE_SIZE);
r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, bo_ptr, NULL,
cpu_addr);
if (r)
return r;
if ((*bo_ptr) == NULL)
return 0;
/*
* Remove the original mem node and create a new one at the request
* position.
*/
if (cpu_addr)
amdgpu_bo_kunmap(*bo_ptr);
ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.resource);
for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
(*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
(*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
}
r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
&(*bo_ptr)->tbo.resource, &ctx);
if (r)
goto error;
if (cpu_addr) {
r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
if (r)
goto error;
}
amdgpu_bo_unreserve(*bo_ptr);
return 0;
error:
amdgpu_bo_unreserve(*bo_ptr);
amdgpu_bo_unref(bo_ptr);
return r;
}
/**
* amdgpu_bo_free_kernel - free BO for kernel use
*
* @bo: amdgpu BO to free
* @gpu_addr: pointer to where the BO's GPU memory space address was stored
* @cpu_addr: pointer to where the BO's CPU memory space address was stored
*
* unmaps and unpin a BO for kernel internal use.
*/
void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
void **cpu_addr)
{
if (*bo == NULL)
return;
WARN_ON(amdgpu_ttm_adev((*bo)->tbo.bdev)->in_suspend);
if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
if (cpu_addr)
amdgpu_bo_kunmap(*bo);
amdgpu_bo_unpin(*bo);
amdgpu_bo_unreserve(*bo);
}
amdgpu_bo_unref(bo);
if (gpu_addr)
*gpu_addr = 0;
if (cpu_addr)
*cpu_addr = NULL;
}
/* Validate bo size is bit bigger then the request domain */
static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
unsigned long size, u32 domain)
{
struct ttm_resource_manager *man = NULL;
/*
* If GTT is part of requested domains the check must succeed to
* allow fall back to GTT.
*/
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
if (man && size < man->size)
return true;
else if (!man)
WARN_ON_ONCE("GTT domain requested but GTT mem manager uninitialized");
goto fail;
} else if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
if (man && size < man->size)
return true;
goto fail;
}
/* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU, _DOMAIN_DOORBELL */
return true;
fail:
if (man)
DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
man->size);
return false;
}
bool amdgpu_bo_support_uswc(u64 bo_flags)
{
#ifdef CONFIG_X86_32
/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
* See https://bugs.freedesktop.org/show_bug.cgi?id=84627
*/
return false;
#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
/* Don't try to enable write-combining when it can't work, or things
* may be slow
* See https://bugs.freedesktop.org/show_bug.cgi?id=88758
*/
#ifndef CONFIG_COMPILE_TEST
#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
thanks to write-combining
#endif
if (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
"better performance thanks to write-combining\n");
return false;
#else
/* For architectures that don't support WC memory,
* mask out the WC flag from the BO
*/
if (!drm_arch_can_wc_memory())
return false;
return true;
#endif
}
/**
* amdgpu_bo_create - create an &amdgpu_bo buffer object
* @adev: amdgpu device object
* @bp: parameters to be used for the buffer object
* @bo_ptr: pointer to the buffer object pointer
*
* Creates an &amdgpu_bo buffer object.
*
* Returns:
* 0 for success or a negative error code on failure.
*/
int amdgpu_bo_create(struct amdgpu_device *adev,
struct amdgpu_bo_param *bp,
struct amdgpu_bo **bo_ptr)
{
struct ttm_operation_ctx ctx = {
.interruptible = (bp->type != ttm_bo_type_kernel),
.no_wait_gpu = bp->no_wait_gpu,
/* We opt to avoid OOM on system pages allocations */
.gfp_retry_mayfail = true,
.allow_res_evict = bp->type != ttm_bo_type_kernel,
.resv = bp->resv
};
struct amdgpu_bo *bo;
unsigned long page_align, size = bp->size;
int r;
/* Note that GDS/GWS/OA allocates 1 page per byte/resource. */
if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
/* GWS and OA don't need any alignment. */
page_align = bp->byte_align;
size <<= PAGE_SHIFT;
} else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
/* Both size and alignment must be a multiple of 4. */
page_align = ALIGN(bp->byte_align, 4);
size = ALIGN(size, 4) << PAGE_SHIFT;
} else {
/* Memory should be aligned at least to a page size. */
page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
size = ALIGN(size, PAGE_SIZE);
}
if (!amdgpu_bo_validate_size(adev, size, bp->domain))
return -ENOMEM;
BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo));
*bo_ptr = NULL;
bo = kvzalloc(bp->bo_ptr_size, GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
bo->vm_bo = NULL;
bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
bp->domain;
bo->allowed_domains = bo->preferred_domains;
if (bp->type != ttm_bo_type_kernel &&
!(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE) &&
bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
bo->flags = bp->flags;
if (adev->gmc.mem_partitions)
/* For GPUs with spatial partitioning, bo->xcp_id=-1 means any partition */
bo->xcp_id = bp->xcp_id_plus1 - 1;
else
/* For GPUs without spatial partitioning */
bo->xcp_id = 0;
if (!amdgpu_bo_support_uswc(bo->flags))
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
if (adev->ras_enabled)
bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
bo->tbo.bdev = &adev->mman.bdev;
if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
AMDGPU_GEM_DOMAIN_GDS))
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
else
amdgpu_bo_placement_from_domain(bo, bp->domain);
if (bp->type == ttm_bo_type_kernel)
bo->tbo.priority = 1;
if (!bp->destroy)
bp->destroy = &amdgpu_bo_destroy;
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, bp->type,
&bo->placement, page_align, &ctx, NULL,
bp->resv, bp->destroy);
if (unlikely(r != 0))
return r;
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
bo->tbo.resource->mem_type == TTM_PL_VRAM &&
amdgpu_bo_in_cpu_visible_vram(bo))
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
ctx.bytes_moved);
else
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
bo->tbo.resource->mem_type == TTM_PL_VRAM) {
struct dma_fence *fence;
r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence, true);
if (unlikely(r))
goto fail_unreserve;
dma_resv_add_fence(bo->tbo.base.resv, fence,
DMA_RESV_USAGE_KERNEL);
dma_fence_put(fence);
}
if (!bp->resv)
amdgpu_bo_unreserve(bo);
*bo_ptr = bo;
trace_amdgpu_bo_create(bo);
/* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
if (bp->type == ttm_bo_type_device)
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
return 0;
fail_unreserve:
if (!bp->resv)
dma_resv_unlock(bo->tbo.base.resv);
amdgpu_bo_unref(&bo);
return r;
}
/**
* amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object
* @adev: amdgpu device object
* @bp: parameters to be used for the buffer object
* @ubo_ptr: pointer to the buffer object pointer
*
* Create a BO to be used by user application;
*
* Returns:
* 0 for success or a negative error code on failure.
*/
int amdgpu_bo_create_user(struct amdgpu_device *adev,
struct amdgpu_bo_param *bp,
struct amdgpu_bo_user **ubo_ptr)
{
struct amdgpu_bo *bo_ptr;
int r;
bp->bo_ptr_size = sizeof(struct amdgpu_bo_user);
bp->destroy = &amdgpu_bo_user_destroy;
r = amdgpu_bo_create(adev, bp, &bo_ptr);
if (r)
return r;
*ubo_ptr = to_amdgpu_bo_user(bo_ptr);
return r;
}
/**
* amdgpu_bo_create_vm - create an &amdgpu_bo_vm buffer object
* @adev: amdgpu device object
* @bp: parameters to be used for the buffer object
* @vmbo_ptr: pointer to the buffer object pointer
*
* Create a BO to be for GPUVM.
*
* Returns:
* 0 for success or a negative error code on failure.
*/
int amdgpu_bo_create_vm(struct amdgpu_device *adev,
struct amdgpu_bo_param *bp,
struct amdgpu_bo_vm **vmbo_ptr)
{
struct amdgpu_bo *bo_ptr;
int r;
/* bo_ptr_size will be determined by the caller and it depends on
* num of amdgpu_vm_pt entries.
*/
BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm));
r = amdgpu_bo_create(adev, bp, &bo_ptr);
if (r)
return r;
*vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
return r;
}
/**
* amdgpu_bo_add_to_shadow_list - add a BO to the shadow list
*
* @vmbo: BO that will be inserted into the shadow list
*
* Insert a BO to the shadow list.
*/
void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(vmbo->bo.tbo.bdev);
mutex_lock(&adev->shadow_list_lock);
list_add_tail(&vmbo->shadow_list, &adev->shadow_list);
vmbo->shadow->parent = amdgpu_bo_ref(&vmbo->bo);
vmbo->shadow->tbo.destroy = &amdgpu_bo_vm_destroy;
mutex_unlock(&adev->shadow_list_lock);
}
/**
* amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow
*
* @shadow: &amdgpu_bo shadow to be restored
* @fence: dma_fence associated with the operation
*
* Copies a buffer object's shadow content back to the object.
* This is used for recovering a buffer from its shadow in case of a gpu
* reset where vram context may be lost.
*
* Returns:
* 0 for success or a negative error code on failure.
*/
int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(shadow->tbo.bdev);
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
uint64_t shadow_addr, parent_addr;
shadow_addr = amdgpu_bo_gpu_offset(shadow);
parent_addr = amdgpu_bo_gpu_offset(shadow->parent);
return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
amdgpu_bo_size(shadow), NULL, fence,
true, false, false);
}
/**
* amdgpu_bo_kmap - map an &amdgpu_bo buffer object
* @bo: &amdgpu_bo buffer object to be mapped
* @ptr: kernel virtual address to be returned
*
* Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls
* amdgpu_bo_kptr() to get the kernel virtual address.
*
* Returns:
* 0 for success or a negative error code on failure.
*/
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
{
void *kptr;
long r;
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
return -EPERM;
r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL,
false, MAX_SCHEDULE_TIMEOUT);
if (r < 0)
return r;
kptr = amdgpu_bo_kptr(bo);
if (kptr) {
if (ptr)
*ptr = kptr;
return 0;
}
r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap);
if (r)
return r;
if (ptr)
*ptr = amdgpu_bo_kptr(bo);
return 0;
}
/**
* amdgpu_bo_kptr - returns a kernel virtual address of the buffer object
* @bo: &amdgpu_bo buffer object
*
* Calls ttm_kmap_obj_virtual() to get the kernel virtual address
*
* Returns:
* the virtual address of a buffer object area.
*/
void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
{
bool is_iomem;
return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
}
/**
* amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object
* @bo: &amdgpu_bo buffer object to be unmapped
*
* Unmaps a kernel map set up by amdgpu_bo_kmap().
*/
void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
{
if (bo->kmap.bo)
ttm_bo_kunmap(&bo->kmap);
}
/**
* amdgpu_bo_ref - reference an &amdgpu_bo buffer object
* @bo: &amdgpu_bo buffer object
*
* References the contained &ttm_buffer_object.
*
* Returns:
* a refcounted pointer to the &amdgpu_bo buffer object.
*/
struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
{
if (bo == NULL)
return NULL;
ttm_bo_get(&bo->tbo);
return bo;
}
/**
* amdgpu_bo_unref - unreference an &amdgpu_bo buffer object
* @bo: &amdgpu_bo buffer object
*
* Unreferences the contained &ttm_buffer_object and clear the pointer
*/
void amdgpu_bo_unref(struct amdgpu_bo **bo)
{
struct ttm_buffer_object *tbo;
if ((*bo) == NULL)
return;
tbo = &((*bo)->tbo);
ttm_bo_put(tbo);
*bo = NULL;
}
/**
* amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object
* @bo: &amdgpu_bo buffer object to be pinned
* @domain: domain to be pinned to
* @min_offset: the start of requested address range
* @max_offset: the end of requested address range
*
* Pins the buffer object according to requested domain and address range. If
* the memory is unbound gart memory, binds the pages into gart table. Adjusts
* pin_count and pin_size accordingly.
*
* Pinning means to lock pages in memory along with keeping them at a fixed
* offset. It is required when a buffer can not be moved, for example, when
* a display buffer is being scanned out.
*
* Compared with amdgpu_bo_pin(), this function gives more flexibility on
* where to pin a buffer if there are specific restrictions on where a buffer
* must be located.
*
* Returns:
* 0 for success or a negative error code on failure.
*/
int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
u64 min_offset, u64 max_offset)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_operation_ctx ctx = { false, false };
int r, i;
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
return -EPERM;
if (WARN_ON_ONCE(min_offset > max_offset))
return -EINVAL;
/* Check domain to be pinned to against preferred domains */
if (bo->preferred_domains & domain)
domain = bo->preferred_domains & domain;
/* A shared bo cannot be migrated to VRAM */
if (bo->tbo.base.import_attach) {
if (domain & AMDGPU_GEM_DOMAIN_GTT)
domain = AMDGPU_GEM_DOMAIN_GTT;
else
return -EINVAL;
}
if (bo->tbo.pin_count) {
uint32_t mem_type = bo->tbo.resource->mem_type;
uint32_t mem_flags = bo->tbo.resource->placement;
if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
return -EINVAL;
if ((mem_type == TTM_PL_VRAM) &&
(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) &&
!(mem_flags & TTM_PL_FLAG_CONTIGUOUS))
return -EINVAL;
ttm_bo_pin(&bo->tbo);
if (max_offset != 0) {
u64 domain_start = amdgpu_ttm_domain_start(adev,
mem_type);
WARN_ON_ONCE(max_offset <
(amdgpu_bo_gpu_offset(bo) - domain_start));
}
return 0;
}
/* This assumes only APU display buffers are pinned with (VRAM|GTT).
* See function amdgpu_display_supported_domains()
*/
domain = amdgpu_bo_get_preferred_domain(adev, domain);
if (bo->tbo.base.import_attach)
dma_buf_pin(bo->tbo.base.import_attach);
/* force to pin into visible video ram */
if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
amdgpu_bo_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++) {
unsigned int fpfn, lpfn;
fpfn = min_offset >> PAGE_SHIFT;
lpfn = max_offset >> PAGE_SHIFT;
if (fpfn > bo->placements[i].fpfn)
bo->placements[i].fpfn = fpfn;
if (!bo->placements[i].lpfn ||
(lpfn && lpfn < bo->placements[i].lpfn))
bo->placements[i].lpfn = lpfn;
}
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (unlikely(r)) {
dev_err(adev->dev, "%p pin failed\n", bo);
goto error;
}
ttm_bo_pin(&bo->tbo);
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
&adev->visible_pin_size);
} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
}
error:
return r;
}
/**
* amdgpu_bo_pin - pin an &amdgpu_bo buffer object
* @bo: &amdgpu_bo buffer object to be pinned
* @domain: domain to be pinned to
*
* A simple wrapper to amdgpu_bo_pin_restricted().
* Provides a simpler API for buffers that do not have any strict restrictions
* on where a buffer must be located.
*
* Returns:
* 0 for success or a negative error code on failure.
*/
int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
{
bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
}
/**
* amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
* @bo: &amdgpu_bo buffer object to be unpinned
*
* Decreases the pin_count, and clears the flags if pin_count reaches 0.
* Changes placement and pin size accordingly.
*
* Returns:
* 0 for success or a negative error code on failure.
*/
void amdgpu_bo_unpin(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
ttm_bo_unpin(&bo->tbo);
if (bo->tbo.pin_count)
return;
if (bo->tbo.base.import_attach)
dma_buf_unpin(bo->tbo.base.import_attach);
if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
&adev->visible_pin_size);
} else if (bo->tbo.resource->mem_type == TTM_PL_TT) {
atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
}
}
static const char * const amdgpu_vram_names[] = {
"UNKNOWN",
"GDDR1",
"DDR2",
"GDDR3",
"GDDR4",
"GDDR5",
"HBM",
"DDR3",
"DDR4",
"GDDR6",
"DDR5",
"LPDDR4",
"LPDDR5"
};
/**
* amdgpu_bo_init - initialize memory manager
* @adev: amdgpu device object
*
* Calls amdgpu_ttm_init() to initialize amdgpu memory manager.
*
* Returns:
* 0 for success or a negative error code on failure.
*/
int amdgpu_bo_init(struct amdgpu_device *adev)
{
/* On A+A platform, VRAM can be mapped as WB */
if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
/* reserve PAT memory space to WC for VRAM */
int r = arch_io_reserve_memtype_wc(adev->gmc.aper_base,
adev->gmc.aper_size);
if (r) {
DRM_ERROR("Unable to set WC memtype for the aperture base\n");
return r;
}
/* Add an MTRR for the VRAM */
adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
adev->gmc.aper_size);
}
DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
adev->gmc.mc_vram_size >> 20,
(unsigned long long)adev->gmc.aper_size >> 20);
DRM_INFO("RAM width %dbits %s\n",
adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
return amdgpu_ttm_init(adev);
}
/**
* amdgpu_bo_fini - tear down memory manager
* @adev: amdgpu device object
*
* Reverses amdgpu_bo_init() to tear down memory manager.
*/
void amdgpu_bo_fini(struct amdgpu_device *adev)
{
int idx;
amdgpu_ttm_fini(adev);
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
arch_phys_wc_del(adev->gmc.vram_mtrr);
arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
}
drm_dev_exit(idx);
}
}
/**
* amdgpu_bo_set_tiling_flags - set tiling flags
* @bo: &amdgpu_bo buffer object
* @tiling_flags: new flags
*
* Sets buffer object's tiling flags with the new one. Used by GEM ioctl or
* kernel driver to set the tiling flags on a buffer.
*
* Returns:
* 0 for success or a negative error code on failure.
*/
int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_bo_user *ubo;
BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
if (adev->family <= AMDGPU_FAMILY_CZ &&
AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
return -EINVAL;
ubo = to_amdgpu_bo_user(bo);
ubo->tiling_flags = tiling_flags;
return 0;
}
/**
* amdgpu_bo_get_tiling_flags - get tiling flags
* @bo: &amdgpu_bo buffer object
* @tiling_flags: returned flags
*
* Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to
* set the tiling flags on a buffer.
*/
void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
{
struct amdgpu_bo_user *ubo;
BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
dma_resv_assert_held(bo->tbo.base.resv);
ubo = to_amdgpu_bo_user(bo);
if (tiling_flags)
*tiling_flags = ubo->tiling_flags;
}
/**
* amdgpu_bo_set_metadata - set metadata
* @bo: &amdgpu_bo buffer object
* @metadata: new metadata
* @metadata_size: size of the new metadata
* @flags: flags of the new metadata
*
* Sets buffer object's metadata, its size and flags.
* Used via GEM ioctl.
*
* Returns:
* 0 for success or a negative error code on failure.
*/
int amdgpu_bo_set_metadata(struct amdgpu_bo *bo, void *metadata,
u32 metadata_size, uint64_t flags)
{
struct amdgpu_bo_user *ubo;
void *buffer;
BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
ubo = to_amdgpu_bo_user(bo);
if (!metadata_size) {
if (ubo->metadata_size) {
kfree(ubo->metadata);
ubo->metadata = NULL;
ubo->metadata_size = 0;
}
return 0;
}
if (metadata == NULL)
return -EINVAL;
buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
if (buffer == NULL)
return -ENOMEM;
kfree(ubo->metadata);
ubo->metadata_flags = flags;
ubo->metadata = buffer;
ubo->metadata_size = metadata_size;
return 0;
}
/**
* amdgpu_bo_get_metadata - get metadata
* @bo: &amdgpu_bo buffer object
* @buffer: returned metadata
* @buffer_size: size of the buffer
* @metadata_size: size of the returned metadata
* @flags: flags of the returned metadata
*
* Gets buffer object's metadata, its size and flags. buffer_size shall not be
* less than metadata_size.
* Used via GEM ioctl.
*
* Returns:
* 0 for success or a negative error code on failure.
*/
int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
size_t buffer_size, uint32_t *metadata_size,
uint64_t *flags)
{
struct amdgpu_bo_user *ubo;
if (!buffer && !metadata_size)
return -EINVAL;
BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
ubo = to_amdgpu_bo_user(bo);
if (metadata_size)
*metadata_size = ubo->metadata_size;
if (buffer) {
if (buffer_size < ubo->metadata_size)
return -EINVAL;
if (ubo->metadata_size)
memcpy(buffer, ubo->metadata, ubo->metadata_size);
}
if (flags)
*flags = ubo->metadata_flags;
return 0;
}
/**
* amdgpu_bo_move_notify - notification about a memory move
* @bo: pointer to a buffer object
* @evict: if this move is evicting the buffer from the graphics address space
* @new_mem: new information of the bufer object
*
* Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
* bookkeeping.
* TTM driver callback which is called when ttm moves a buffer.
*/
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
struct ttm_resource *new_mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
struct ttm_resource *old_mem = bo->resource;
if (!amdgpu_bo_is_amdgpu_bo(bo))
return;
abo = ttm_to_amdgpu_bo(bo);
amdgpu_vm_bo_invalidate(adev, abo, evict);
amdgpu_bo_kunmap(abo);
if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
bo->resource->mem_type != TTM_PL_SYSTEM)
dma_buf_move_notify(abo->tbo.base.dma_buf);
/* remember the eviction */
if (evict)
atomic64_inc(&adev->num_evictions);
/* update statistics */
if (!new_mem)
return;
/* move_notify is called before move happens */
trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
}
void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
struct amdgpu_mem_stats *stats)
{
uint64_t size = amdgpu_bo_size(bo);
unsigned int domain;
/* Abort if the BO doesn't currently have a backing store */
if (!bo->tbo.resource)
return;
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
stats->vram += size;
if (amdgpu_bo_in_cpu_visible_vram(bo))
stats->visible_vram += size;
break;
case AMDGPU_GEM_DOMAIN_GTT:
stats->gtt += size;
break;
case AMDGPU_GEM_DOMAIN_CPU:
default:
stats->cpu += size;
break;
}
if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) {
stats->requested_vram += size;
if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
stats->requested_visible_vram += size;
if (domain != AMDGPU_GEM_DOMAIN_VRAM) {
stats->evicted_vram += size;
if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
stats->evicted_visible_vram += size;
}
} else if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_GTT) {
stats->requested_gtt += size;
}
}
/**
* amdgpu_bo_release_notify - notification about a BO being released
* @bo: pointer to a buffer object
*
* Wipes VRAM buffers whose contents should not be leaked before the
* memory is released.
*/
void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct dma_fence *fence = NULL;
struct amdgpu_bo *abo;
int r;
if (!amdgpu_bo_is_amdgpu_bo(bo))
return;
abo = ttm_to_amdgpu_bo(bo);
if (abo->kfd_bo)
amdgpu_amdkfd_release_notify(abo);
/* We only remove the fence if the resv has individualized. */
WARN_ON_ONCE(bo->type == ttm_bo_type_kernel
&& bo->base.resv != &bo->base._resv);
if (bo->base.resv == &bo->base._resv)
amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM ||
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
adev->in_suspend || drm_dev_is_unplugged(adev_to_drm(adev)))
return;
if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
return;
r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence, true);
if (!WARN_ON(r)) {
amdgpu_bo_fence(abo, fence, false);
dma_fence_put(fence);
}
dma_resv_unlock(bo->base.resv);
}
/**
* amdgpu_bo_fault_reserve_notify - notification about a memory fault
* @bo: pointer to a buffer object
*
* Notifies the driver we are taking a fault on this BO and have reserved it,
* also performs bookkeeping.
* TTM driver callback for dealing with vm faults.
*
* Returns:
* 0 for success or a negative error code on failure.
*/
vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
int r;
/* Remember that this BO was accessed by the CPU */
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
if (bo->resource->mem_type != TTM_PL_VRAM)
return 0;
if (amdgpu_bo_in_cpu_visible_vram(abo))
return 0;
/* Can't move a pinned BO to visible VRAM */
if (abo->tbo.pin_count > 0)
return VM_FAULT_SIGBUS;
/* hurrah the memory is not visible ! */
atomic64_inc(&adev->num_vram_cpu_page_faults);
amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT);
/* Avoid costly evictions; only set GTT as a busy placement */
abo->placement.num_busy_placement = 1;
abo->placement.busy_placement = &abo->placements[1];
r = ttm_bo_validate(bo, &abo->placement, &ctx);
if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
return VM_FAULT_NOPAGE;
else if (unlikely(r))
return VM_FAULT_SIGBUS;
/* this should never happen */
if (bo->resource->mem_type == TTM_PL_VRAM &&
!amdgpu_bo_in_cpu_visible_vram(abo))
return VM_FAULT_SIGBUS;
ttm_bo_move_to_lru_tail_unlocked(bo);
return 0;
}
/**
* amdgpu_bo_fence - add fence to buffer object
*
* @bo: buffer object in question
* @fence: fence to add
* @shared: true if fence should be added shared
*
*/
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared)
{
struct dma_resv *resv = bo->tbo.base.resv;
int r;
r = dma_resv_reserve_fences(resv, 1);
if (r) {
/* As last resort on OOM we block for the fence */
dma_fence_wait(fence, false);
return;
}
dma_resv_add_fence(resv, fence, shared ? DMA_RESV_USAGE_READ :
DMA_RESV_USAGE_WRITE);
}
/**
* amdgpu_bo_sync_wait_resv - Wait for BO reservation fences
*
* @adev: amdgpu device pointer
* @resv: reservation object to sync to
* @sync_mode: synchronization mode
* @owner: fence owner
* @intr: Whether the wait is interruptible
*
* Extract the fences from the reservation object and waits for them to finish.
*
* Returns:
* 0 on success, errno otherwise.
*/
int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
enum amdgpu_sync_mode sync_mode, void *owner,
bool intr)
{
struct amdgpu_sync sync;
int r;
amdgpu_sync_create(&sync);
amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner);
r = amdgpu_sync_wait(&sync, intr);
amdgpu_sync_free(&sync);
return r;
}
/**
* amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv
* @bo: buffer object to wait for
* @owner: fence owner
* @intr: Whether the wait is interruptible
*
* Wrapper to wait for fences in a BO.
* Returns:
* 0 on success, errno otherwise.
*/
int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv,
AMDGPU_SYNC_NE_OWNER, owner, intr);
}
/**
* amdgpu_bo_gpu_offset - return GPU offset of bo
* @bo: amdgpu object for which we query the offset
*
* Note: object should either be pinned or reserved when calling this
* function, it might be useful to add check for this for debugging.
*
* Returns:
* current GPU offset of the object.
*/
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
{
WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_SYSTEM);
WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
!bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel);
WARN_ON_ONCE(bo->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET);
WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_VRAM &&
!(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
return amdgpu_bo_gpu_offset_no_check(bo);
}
/**
* amdgpu_bo_gpu_offset_no_check - return GPU offset of bo
* @bo: amdgpu object for which we query the offset
*
* Returns:
* current GPU offset of the object without raising warnings.
*/
u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
uint64_t offset;
offset = (bo->tbo.resource->start << PAGE_SHIFT) +
amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
return amdgpu_gmc_sign_extend(offset);
}
/**
* amdgpu_bo_get_preferred_domain - get preferred domain
* @adev: amdgpu device object
* @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
*
* Returns:
* Which of the allowed domains is preferred for allocating the BO.
*/
uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
uint32_t domain)
{
if ((domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) &&
((adev->asic_type == CHIP_CARRIZO) || (adev->asic_type == CHIP_STONEY))) {
domain = AMDGPU_GEM_DOMAIN_VRAM;
if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
domain = AMDGPU_GEM_DOMAIN_GTT;
}
return domain;
}
#if defined(CONFIG_DEBUG_FS)
#define amdgpu_bo_print_flag(m, bo, flag) \
do { \
if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) { \
seq_printf((m), " " #flag); \
} \
} while (0)
/**
* amdgpu_bo_print_info - print BO info in debugfs file
*
* @id: Index or Id of the BO
* @bo: Requested BO for printing info
* @m: debugfs file
*
* Print BO information in debugfs file
*
* Returns:
* Size of the BO in bytes.
*/
u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
{
struct dma_buf_attachment *attachment;
struct dma_buf *dma_buf;
const char *placement;
unsigned int pin_count;
u64 size;
if (dma_resv_trylock(bo->tbo.base.resv)) {
unsigned int domain;
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
if (amdgpu_bo_in_cpu_visible_vram(bo))
placement = "VRAM VISIBLE";
else
placement = "VRAM";
break;
case AMDGPU_GEM_DOMAIN_GTT:
placement = "GTT";
break;
case AMDGPU_GEM_DOMAIN_CPU:
default:
placement = "CPU";
break;
}
dma_resv_unlock(bo->tbo.base.resv);
} else {
placement = "UNKNOWN";
}
size = amdgpu_bo_size(bo);
seq_printf(m, "\t\t0x%08x: %12lld byte %s",
id, size, placement);
pin_count = READ_ONCE(bo->tbo.pin_count);
if (pin_count)
seq_printf(m, " pin count %d", pin_count);
dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
attachment = READ_ONCE(bo->tbo.base.import_attach);
if (attachment)
seq_printf(m, " imported from ino:%lu", file_inode(dma_buf->file)->i_ino);
else if (dma_buf)
seq_printf(m, " exported as ino:%lu", file_inode(dma_buf->file)->i_ino);
amdgpu_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
amdgpu_bo_print_flag(m, bo, NO_CPU_ACCESS);
amdgpu_bo_print_flag(m, bo, CPU_GTT_USWC);
amdgpu_bo_print_flag(m, bo, VRAM_CLEARED);
amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID);
amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC);
seq_puts(m, "\n");
return size;
}
#endif
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_object.c |
// SPDX-License-Identifier: MIT
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
/**
* amdgpu_mm_rdoorbell - read a doorbell dword
*
* @adev: amdgpu_device pointer
* @index: doorbell index
*
* Returns the value in the doorbell aperture at the
* requested doorbell index (CIK).
*/
u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
{
if (amdgpu_device_skip_hw_access(adev))
return 0;
if (index < adev->doorbell.num_kernel_doorbells)
return readl(adev->doorbell.cpu_addr + index);
DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
return 0;
}
/**
* amdgpu_mm_wdoorbell - write a doorbell dword
*
* @adev: amdgpu_device pointer
* @index: doorbell index
* @v: value to write
*
* Writes @v to the doorbell aperture at the
* requested doorbell index (CIK).
*/
void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
{
if (amdgpu_device_skip_hw_access(adev))
return;
if (index < adev->doorbell.num_kernel_doorbells)
writel(v, adev->doorbell.cpu_addr + index);
else
DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
}
/**
* amdgpu_mm_rdoorbell64 - read a doorbell Qword
*
* @adev: amdgpu_device pointer
* @index: doorbell index
*
* Returns the value in the doorbell aperture at the
* requested doorbell index (VEGA10+).
*/
u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
{
if (amdgpu_device_skip_hw_access(adev))
return 0;
if (index < adev->doorbell.num_kernel_doorbells)
return atomic64_read((atomic64_t *)(adev->doorbell.cpu_addr + index));
DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
return 0;
}
/**
* amdgpu_mm_wdoorbell64 - write a doorbell Qword
*
* @adev: amdgpu_device pointer
* @index: doorbell index
* @v: value to write
*
* Writes @v to the doorbell aperture at the
* requested doorbell index (VEGA10+).
*/
void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
{
if (amdgpu_device_skip_hw_access(adev))
return;
if (index < adev->doorbell.num_kernel_doorbells)
atomic64_set((atomic64_t *)(adev->doorbell.cpu_addr + index), v);
else
DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
}
/**
* amdgpu_doorbell_index_on_bar - Find doorbell's absolute offset in BAR
*
* @adev: amdgpu_device pointer
* @db_bo: doorbell object's bo
* @db_index: doorbell relative index in this doorbell object
*
* returns doorbell's absolute index in BAR
*/
uint32_t amdgpu_doorbell_index_on_bar(struct amdgpu_device *adev,
struct amdgpu_bo *db_bo,
uint32_t doorbell_index)
{
int db_bo_offset;
db_bo_offset = amdgpu_bo_gpu_offset_no_check(db_bo);
/* doorbell index is 32 bit but doorbell's size is 64-bit, so *2 */
return db_bo_offset / sizeof(u32) + doorbell_index * 2;
}
/**
* amdgpu_doorbell_create_kernel_doorbells - Create kernel doorbells for graphics
*
* @adev: amdgpu_device pointer
*
* Creates doorbells for graphics driver usages.
* returns 0 on success, error otherwise.
*/
int amdgpu_doorbell_create_kernel_doorbells(struct amdgpu_device *adev)
{
int r;
int size;
/* Reserve first num_kernel_doorbells (page-aligned) for kernel ops */
size = ALIGN(adev->doorbell.num_kernel_doorbells * sizeof(u32), PAGE_SIZE);
/* Allocate an extra page for MES kernel usages (ring test) */
adev->mes.db_start_dw_offset = size / sizeof(u32);
size += PAGE_SIZE;
r = amdgpu_bo_create_kernel(adev,
size,
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_DOORBELL,
&adev->doorbell.kernel_doorbells,
NULL,
(void **)&adev->doorbell.cpu_addr);
if (r) {
DRM_ERROR("Failed to allocate kernel doorbells, err=%d\n", r);
return r;
}
adev->doorbell.num_kernel_doorbells = size / sizeof(u32);
return 0;
}
/*
* GPU doorbell aperture helpers function.
*/
/**
* amdgpu_doorbell_init - Init doorbell driver information.
*
* @adev: amdgpu_device pointer
*
* Init doorbell driver information (CIK)
* Returns 0 on success, error on failure.
*/
int amdgpu_doorbell_init(struct amdgpu_device *adev)
{
/* No doorbell on SI hardware generation */
if (adev->asic_type < CHIP_BONAIRE) {
adev->doorbell.base = 0;
adev->doorbell.size = 0;
adev->doorbell.num_kernel_doorbells = 0;
return 0;
}
if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
return -EINVAL;
amdgpu_asic_init_doorbell_index(adev);
/* doorbell bar mapping */
adev->doorbell.base = pci_resource_start(adev->pdev, 2);
adev->doorbell.size = pci_resource_len(adev->pdev, 2);
adev->doorbell.num_kernel_doorbells =
min_t(u32, adev->doorbell.size / sizeof(u32),
adev->doorbell_index.max_assignment + 1);
if (adev->doorbell.num_kernel_doorbells == 0)
return -EINVAL;
/*
* For Vega, reserve and map two pages on doorbell BAR since SDMA
* paging queue doorbell use the second page. The
* AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
* doorbells are in the first page. So with paging queue enabled,
* the max num_kernel_doorbells should + 1 page (0x400 in dword)
*/
if (adev->asic_type >= CHIP_VEGA10)
adev->doorbell.num_kernel_doorbells += 0x400;
return 0;
}
/**
* amdgpu_doorbell_fini - Tear down doorbell driver information.
*
* @adev: amdgpu_device pointer
*
* Tear down doorbell driver information (CIK)
*/
void amdgpu_doorbell_fini(struct amdgpu_device *adev)
{
amdgpu_bo_free_kernel(&adev->doorbell.kernel_doorbells,
NULL,
(void **)&adev->doorbell.cpu_addr);
}
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c |
/*
* Copyright 2007-11 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
*/
#include <linux/pci.h>
#include <acpi/video.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_connectors.h"
#include "amdgpu_display.h"
#include "atom.h"
#include "atombios_encoders.h"
#include "atombios_dp.h"
#include <linux/backlight.h>
#include "bif/bif_4_1_d.h"
u8
amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev)
{
u8 backlight_level;
u32 bios_2_scratch;
bios_2_scratch = RREG32(mmBIOS_SCRATCH_2);
backlight_level = ((bios_2_scratch & ATOM_S2_CURRENT_BL_LEVEL_MASK) >>
ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
return backlight_level;
}
void
amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev,
u8 backlight_level)
{
u32 bios_2_scratch;
bios_2_scratch = RREG32(mmBIOS_SCRATCH_2);
bios_2_scratch &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
bios_2_scratch |= ((backlight_level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT) &
ATOM_S2_CURRENT_BL_LEVEL_MASK);
WREG32(mmBIOS_SCRATCH_2, bios_2_scratch);
}
u8
amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
{
struct drm_device *dev = amdgpu_encoder->base.dev;
struct amdgpu_device *adev = drm_to_adev(dev);
if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
return 0;
return amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
}
void
amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
u8 level)
{
struct drm_encoder *encoder = &amdgpu_encoder->base;
struct drm_device *dev = amdgpu_encoder->base.dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder_atom_dig *dig;
if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
return;
if ((amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
amdgpu_encoder->enc_priv) {
dig = amdgpu_encoder->enc_priv;
dig->backlight_level = level;
amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, dig->backlight_level);
switch (amdgpu_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
if (dig->backlight_level == 0)
amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
else {
amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
ATOM_TRANSMITTER_ACTION_BL_BRIGHTNESS_CONTROL, 0, 0);
amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
}
break;
default:
break;
}
}
}
static u8 amdgpu_atombios_encoder_backlight_level(struct backlight_device *bd)
{
u8 level;
/* Convert brightness to hardware level */
if (bd->props.brightness < 0)
level = 0;
else if (bd->props.brightness > AMDGPU_MAX_BL_LEVEL)
level = AMDGPU_MAX_BL_LEVEL;
else
level = bd->props.brightness;
return level;
}
static int amdgpu_atombios_encoder_update_backlight_status(struct backlight_device *bd)
{
struct amdgpu_backlight_privdata *pdata = bl_get_data(bd);
struct amdgpu_encoder *amdgpu_encoder = pdata->encoder;
amdgpu_atombios_encoder_set_backlight_level(amdgpu_encoder,
amdgpu_atombios_encoder_backlight_level(bd));
return 0;
}
static int
amdgpu_atombios_encoder_get_backlight_brightness(struct backlight_device *bd)
{
struct amdgpu_backlight_privdata *pdata = bl_get_data(bd);
struct amdgpu_encoder *amdgpu_encoder = pdata->encoder;
struct drm_device *dev = amdgpu_encoder->base.dev;
struct amdgpu_device *adev = drm_to_adev(dev);
return amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
}
static const struct backlight_ops amdgpu_atombios_encoder_backlight_ops = {
.get_brightness = amdgpu_atombios_encoder_get_backlight_brightness,
.update_status = amdgpu_atombios_encoder_update_backlight_status,
};
void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encoder,
struct drm_connector *drm_connector)
{
struct drm_device *dev = amdgpu_encoder->base.dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct backlight_device *bd;
struct backlight_properties props;
struct amdgpu_backlight_privdata *pdata;
struct amdgpu_encoder_atom_dig *dig;
char bl_name[16];
/* Mac laptops with multiple GPUs use the gmux driver for backlight
* so don't register a backlight device
*/
if ((adev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
(adev->pdev->device == 0x6741))
return;
if (!amdgpu_encoder->enc_priv)
return;
if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
goto register_acpi_backlight;
if (!acpi_video_backlight_use_native()) {
drm_info(dev, "Skipping amdgpu atom DIG backlight registration\n");
goto register_acpi_backlight;
}
pdata = kmalloc(sizeof(struct amdgpu_backlight_privdata), GFP_KERNEL);
if (!pdata) {
DRM_ERROR("Memory allocation failed\n");
goto error;
}
memset(&props, 0, sizeof(props));
props.max_brightness = AMDGPU_MAX_BL_LEVEL;
props.type = BACKLIGHT_RAW;
snprintf(bl_name, sizeof(bl_name),
"amdgpu_bl%d", dev->primary->index);
bd = backlight_device_register(bl_name, drm_connector->kdev,
pdata, &amdgpu_atombios_encoder_backlight_ops, &props);
if (IS_ERR(bd)) {
DRM_ERROR("Backlight registration failed\n");
goto error;
}
pdata->encoder = amdgpu_encoder;
dig = amdgpu_encoder->enc_priv;
dig->bl_dev = bd;
bd->props.brightness = amdgpu_atombios_encoder_get_backlight_brightness(bd);
bd->props.power = FB_BLANK_UNBLANK;
backlight_update_status(bd);
DRM_INFO("amdgpu atom DIG backlight initialized\n");
return;
error:
kfree(pdata);
return;
register_acpi_backlight:
/* Try registering an ACPI video backlight device instead. */
acpi_video_register_backlight();
return;
}
void
amdgpu_atombios_encoder_fini_backlight(struct amdgpu_encoder *amdgpu_encoder)
{
struct drm_device *dev = amdgpu_encoder->base.dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct backlight_device *bd = NULL;
struct amdgpu_encoder_atom_dig *dig;
if (!amdgpu_encoder->enc_priv)
return;
if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
return;
dig = amdgpu_encoder->enc_priv;
bd = dig->bl_dev;
dig->bl_dev = NULL;
if (bd) {
struct amdgpu_legacy_backlight_privdata *pdata;
pdata = bl_get_data(bd);
backlight_device_unregister(bd);
kfree(pdata);
DRM_INFO("amdgpu atom LVDS backlight unloaded\n");
}
}
bool amdgpu_atombios_encoder_is_digital(struct drm_encoder *encoder)
{
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
switch (amdgpu_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
return true;
default:
return false;
}
}
bool amdgpu_atombios_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
/* set the active encoder to connector routing */
amdgpu_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0);
/* hw bug */
if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
&& (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
/* vertical FP must be at least 1 */
if (mode->crtc_vsync_start == mode->crtc_vdisplay)
adjusted_mode->crtc_vsync_start++;
/* get the native mode for scaling */
if (amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
amdgpu_panel_mode_fixup(encoder, adjusted_mode);
else if (amdgpu_encoder->rmx_type != RMX_OFF)
amdgpu_panel_mode_fixup(encoder, adjusted_mode);
if ((amdgpu_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
(amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
amdgpu_atombios_dp_set_link_config(connector, adjusted_mode);
}
return true;
}
static void
amdgpu_atombios_encoder_setup_dac(struct drm_encoder *encoder, int action)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
DAC_ENCODER_CONTROL_PS_ALLOCATION args;
int index = 0;
memset(&args, 0, sizeof(args));
switch (amdgpu_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl);
break;
}
args.ucAction = action;
args.ucDacStandard = ATOM_DAC1_PS2;
args.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
}
static u8 amdgpu_atombios_encoder_get_bpc(struct drm_encoder *encoder)
{
int bpc = 8;
if (encoder->crtc) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
bpc = amdgpu_crtc->bpc;
}
switch (bpc) {
case 0:
return PANEL_BPC_UNDEFINE;
case 6:
return PANEL_6BIT_PER_COLOR;
case 8:
default:
return PANEL_8BIT_PER_COLOR;
case 10:
return PANEL_10BIT_PER_COLOR;
case 12:
return PANEL_12BIT_PER_COLOR;
case 16:
return PANEL_16BIT_PER_COLOR;
}
}
union dvo_encoder_control {
ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds;
DVO_ENCODER_CONTROL_PS_ALLOCATION dvo;
DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3;
DVO_ENCODER_CONTROL_PS_ALLOCATION_V1_4 dvo_v4;
};
static void
amdgpu_atombios_encoder_setup_dvo(struct drm_encoder *encoder, int action)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
union dvo_encoder_control args;
int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
uint8_t frev, crev;
memset(&args, 0, sizeof(args));
if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
return;
switch (frev) {
case 1:
switch (crev) {
case 1:
/* R4xx, R5xx */
args.ext_tmds.sXTmdsEncoder.ucEnable = action;
if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL;
args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB;
break;
case 2:
/* RS600/690/740 */
args.dvo.sDVOEncoder.ucAction = action;
args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
/* DFP1, CRT1, TV1 depending on the type of port */
args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX;
if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL;
break;
case 3:
/* R6xx */
args.dvo_v3.ucAction = action;
args.dvo_v3.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
args.dvo_v3.ucDVOConfig = 0; /* XXX */
break;
case 4:
/* DCE8 */
args.dvo_v4.ucAction = action;
args.dvo_v4.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
args.dvo_v4.ucDVOConfig = 0; /* XXX */
args.dvo_v4.ucBitPerColor = amdgpu_atombios_encoder_get_bpc(encoder);
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
break;
}
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
break;
}
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
}
int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder)
{
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector;
struct amdgpu_connector *amdgpu_connector;
struct amdgpu_connector_atom_dig *dig_connector;
/* dp bridges are always DP */
if (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)
return ATOM_ENCODER_MODE_DP;
/* DVO is always DVO */
if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DVO1) ||
(amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1))
return ATOM_ENCODER_MODE_DVO;
connector = amdgpu_get_connector_for_encoder(encoder);
/* if we don't have an active device yet, just use one of
* the connectors tied to the encoder.
*/
if (!connector)
connector = amdgpu_get_connector_for_encoder_init(encoder);
amdgpu_connector = to_amdgpu_connector(connector);
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
if (amdgpu_audio != 0) {
if (amdgpu_connector->use_digital &&
(amdgpu_connector->audio == AMDGPU_AUDIO_ENABLE))
return ATOM_ENCODER_MODE_HDMI;
else if (connector->display_info.is_hdmi &&
(amdgpu_connector->audio == AMDGPU_AUDIO_AUTO))
return ATOM_ENCODER_MODE_HDMI;
else if (amdgpu_connector->use_digital)
return ATOM_ENCODER_MODE_DVI;
else
return ATOM_ENCODER_MODE_CRT;
} else if (amdgpu_connector->use_digital) {
return ATOM_ENCODER_MODE_DVI;
} else {
return ATOM_ENCODER_MODE_CRT;
}
break;
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
default:
if (amdgpu_audio != 0) {
if (amdgpu_connector->audio == AMDGPU_AUDIO_ENABLE)
return ATOM_ENCODER_MODE_HDMI;
else if (connector->display_info.is_hdmi &&
(amdgpu_connector->audio == AMDGPU_AUDIO_AUTO))
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
} else {
return ATOM_ENCODER_MODE_DVI;
}
case DRM_MODE_CONNECTOR_LVDS:
return ATOM_ENCODER_MODE_LVDS;
case DRM_MODE_CONNECTOR_DisplayPort:
dig_connector = amdgpu_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
return ATOM_ENCODER_MODE_DP;
} else if (amdgpu_audio != 0) {
if (amdgpu_connector->audio == AMDGPU_AUDIO_ENABLE)
return ATOM_ENCODER_MODE_HDMI;
else if (connector->display_info.is_hdmi &&
(amdgpu_connector->audio == AMDGPU_AUDIO_AUTO))
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
} else {
return ATOM_ENCODER_MODE_DVI;
}
case DRM_MODE_CONNECTOR_eDP:
return ATOM_ENCODER_MODE_DP;
case DRM_MODE_CONNECTOR_DVIA:
case DRM_MODE_CONNECTOR_VGA:
return ATOM_ENCODER_MODE_CRT;
case DRM_MODE_CONNECTOR_Composite:
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_9PinDIN:
/* fix me */
return ATOM_ENCODER_MODE_TV;
}
}
/*
* DIG Encoder/Transmitter Setup
*
* DCE 6.0
* - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
* Supports up to 6 digital outputs
* - 6 DIG encoder blocks.
* - DIG to PHY mapping is hardcoded
* DIG1 drives UNIPHY0 link A, A+B
* DIG2 drives UNIPHY0 link B
* DIG3 drives UNIPHY1 link A, A+B
* DIG4 drives UNIPHY1 link B
* DIG5 drives UNIPHY2 link A, A+B
* DIG6 drives UNIPHY2 link B
*
* Routing
* crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
* Examples:
* crtc0 -> dig2 -> LVTMA links A+B -> TMDS/HDMI
* crtc1 -> dig1 -> UNIPHY0 link B -> DP
* crtc0 -> dig1 -> UNIPHY2 link A -> LVDS
* crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI
*/
union dig_encoder_control {
DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
DIG_ENCODER_CONTROL_PARAMETERS_V4 v4;
DIG_ENCODER_CONTROL_PARAMETERS_V5 v5;
};
void
amdgpu_atombios_encoder_setup_dig_encoder(struct drm_encoder *encoder,
int action, int panel_mode)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
union dig_encoder_control args;
int index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl);
uint8_t frev, crev;
int dp_clock = 0;
int dp_lane_count = 0;
int hpd_id = AMDGPU_HPD_NONE;
if (connector) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct amdgpu_connector_atom_dig *dig_connector =
amdgpu_connector->con_priv;
dp_clock = dig_connector->dp_clock;
dp_lane_count = dig_connector->dp_lane_count;
hpd_id = amdgpu_connector->hpd.hpd;
}
/* no dig encoder assigned */
if (dig->dig_encoder == -1)
return;
memset(&args, 0, sizeof(args));
if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
return;
switch (frev) {
case 1:
switch (crev) {
case 1:
args.v1.ucAction = action;
args.v1.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
args.v3.ucPanelMode = panel_mode;
else
args.v1.ucEncoderMode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
args.v1.ucLaneNum = dp_lane_count;
else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
args.v1.ucLaneNum = 8;
else
args.v1.ucLaneNum = 4;
if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
switch (amdgpu_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
break;
}
if (dig->linkb)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
else
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
break;
case 2:
case 3:
args.v3.ucAction = action;
args.v3.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
args.v3.ucPanelMode = panel_mode;
else
args.v3.ucEncoderMode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode))
args.v3.ucLaneNum = dp_lane_count;
else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
args.v3.ucLaneNum = 8;
else
args.v3.ucLaneNum = 4;
if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode) && (dp_clock == 270000))
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
args.v3.acConfig.ucDigSel = dig->dig_encoder;
args.v3.ucBitPerColor = amdgpu_atombios_encoder_get_bpc(encoder);
break;
case 4:
args.v4.ucAction = action;
args.v4.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
args.v4.ucPanelMode = panel_mode;
else
args.v4.ucEncoderMode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode))
args.v4.ucLaneNum = dp_lane_count;
else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
args.v4.ucLaneNum = 8;
else
args.v4.ucLaneNum = 4;
if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode)) {
if (dp_clock == 540000)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
else if (dp_clock == 324000)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_3_24GHZ;
else if (dp_clock == 270000)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
else
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ;
}
args.v4.acConfig.ucDigSel = dig->dig_encoder;
args.v4.ucBitPerColor = amdgpu_atombios_encoder_get_bpc(encoder);
if (hpd_id == AMDGPU_HPD_NONE)
args.v4.ucHPD_ID = 0;
else
args.v4.ucHPD_ID = hpd_id + 1;
break;
case 5:
switch (action) {
case ATOM_ENCODER_CMD_SETUP_PANEL_MODE:
args.v5.asDPPanelModeParam.ucAction = action;
args.v5.asDPPanelModeParam.ucPanelMode = panel_mode;
args.v5.asDPPanelModeParam.ucDigId = dig->dig_encoder;
break;
case ATOM_ENCODER_CMD_STREAM_SETUP:
args.v5.asStreamParam.ucAction = action;
args.v5.asStreamParam.ucDigId = dig->dig_encoder;
args.v5.asStreamParam.ucDigMode =
amdgpu_atombios_encoder_get_encoder_mode(encoder);
if (ENCODER_MODE_IS_DP(args.v5.asStreamParam.ucDigMode))
args.v5.asStreamParam.ucLaneNum = dp_lane_count;
else if (amdgpu_dig_monitor_is_duallink(encoder,
amdgpu_encoder->pixel_clock))
args.v5.asStreamParam.ucLaneNum = 8;
else
args.v5.asStreamParam.ucLaneNum = 4;
args.v5.asStreamParam.ulPixelClock =
cpu_to_le32(amdgpu_encoder->pixel_clock / 10);
args.v5.asStreamParam.ucBitPerColor =
amdgpu_atombios_encoder_get_bpc(encoder);
args.v5.asStreamParam.ucLinkRateIn270Mhz = dp_clock / 27000;
break;
case ATOM_ENCODER_CMD_DP_LINK_TRAINING_START:
case ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1:
case ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2:
case ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3:
case ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN4:
case ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE:
case ATOM_ENCODER_CMD_DP_VIDEO_OFF:
case ATOM_ENCODER_CMD_DP_VIDEO_ON:
args.v5.asCmdParam.ucAction = action;
args.v5.asCmdParam.ucDigId = dig->dig_encoder;
break;
default:
DRM_ERROR("Unsupported action 0x%x\n", action);
break;
}
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
break;
}
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
break;
}
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
}
union dig_transmitter_control {
DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5 v5;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_6 v6;
};
void
amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int action,
uint8_t lane_num, uint8_t lane_set)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
union dig_transmitter_control args;
int index = 0;
uint8_t frev, crev;
bool is_dp = false;
int pll_id = 0;
int dp_clock = 0;
int dp_lane_count = 0;
int connector_object_id = 0;
int dig_encoder = dig->dig_encoder;
int hpd_id = AMDGPU_HPD_NONE;
if (action == ATOM_TRANSMITTER_ACTION_INIT) {
connector = amdgpu_get_connector_for_encoder_init(encoder);
/* just needed to avoid bailing in the encoder check. the encoder
* isn't used for init
*/
dig_encoder = 0;
} else
connector = amdgpu_get_connector_for_encoder(encoder);
if (connector) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct amdgpu_connector_atom_dig *dig_connector =
amdgpu_connector->con_priv;
hpd_id = amdgpu_connector->hpd.hpd;
dp_clock = dig_connector->dp_clock;
dp_lane_count = dig_connector->dp_lane_count;
connector_object_id =
(amdgpu_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
}
if (encoder->crtc) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
pll_id = amdgpu_crtc->pll_id;
}
/* no dig encoder assigned */
if (dig_encoder == -1)
return;
if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(encoder)))
is_dp = true;
memset(&args, 0, sizeof(args));
switch (amdgpu_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl);
break;
}
if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
return;
switch (frev) {
case 1:
switch (crev) {
case 1:
args.v1.ucAction = action;
if (action == ATOM_TRANSMITTER_ACTION_INIT) {
args.v1.usInitInfo = cpu_to_le16(connector_object_id);
} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
args.v1.asMode.ucLaneSel = lane_num;
args.v1.asMode.ucLaneSet = lane_set;
} else {
if (is_dp)
args.v1.usPixelClock = cpu_to_le16(dp_clock / 10);
else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
args.v1.usPixelClock = cpu_to_le16((amdgpu_encoder->pixel_clock / 2) / 10);
else
args.v1.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
}
args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
if (dig_encoder)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
else
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
if (dig->linkb)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
else
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
if (is_dp)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
else if (amdgpu_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
}
break;
case 2:
args.v2.ucAction = action;
if (action == ATOM_TRANSMITTER_ACTION_INIT) {
args.v2.usInitInfo = cpu_to_le16(connector_object_id);
} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
args.v2.asMode.ucLaneSel = lane_num;
args.v2.asMode.ucLaneSet = lane_set;
} else {
if (is_dp)
args.v2.usPixelClock = cpu_to_le16(dp_clock / 10);
else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
args.v2.usPixelClock = cpu_to_le16((amdgpu_encoder->pixel_clock / 2) / 10);
else
args.v2.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
}
args.v2.acConfig.ucEncoderSel = dig_encoder;
if (dig->linkb)
args.v2.acConfig.ucLinkSel = 1;
switch (amdgpu_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v2.acConfig.ucTransmitterSel = 0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
args.v2.acConfig.ucTransmitterSel = 1;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
args.v2.acConfig.ucTransmitterSel = 2;
break;
}
if (is_dp) {
args.v2.acConfig.fCoherentMode = 1;
args.v2.acConfig.fDPConnector = 1;
} else if (amdgpu_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v2.acConfig.fCoherentMode = 1;
if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
args.v2.acConfig.fDualLinkConnector = 1;
}
break;
case 3:
args.v3.ucAction = action;
if (action == ATOM_TRANSMITTER_ACTION_INIT) {
args.v3.usInitInfo = cpu_to_le16(connector_object_id);
} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
args.v3.asMode.ucLaneSel = lane_num;
args.v3.asMode.ucLaneSet = lane_set;
} else {
if (is_dp)
args.v3.usPixelClock = cpu_to_le16(dp_clock / 10);
else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
args.v3.usPixelClock = cpu_to_le16((amdgpu_encoder->pixel_clock / 2) / 10);
else
args.v3.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
}
if (is_dp)
args.v3.ucLaneNum = dp_lane_count;
else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
args.v3.ucLaneNum = 8;
else
args.v3.ucLaneNum = 4;
if (dig->linkb)
args.v3.acConfig.ucLinkSel = 1;
if (dig_encoder & 1)
args.v3.acConfig.ucEncoderSel = 1;
/* Select the PLL for the PHY
* DP PHY should be clocked from external src if there is
* one.
*/
/* On DCE4, if there is an external clock, it generates the DP ref clock */
if (is_dp && adev->clock.dp_extclk)
args.v3.acConfig.ucRefClkSource = 2; /* external src */
else
args.v3.acConfig.ucRefClkSource = pll_id;
switch (amdgpu_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v3.acConfig.ucTransmitterSel = 0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
args.v3.acConfig.ucTransmitterSel = 1;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
args.v3.acConfig.ucTransmitterSel = 2;
break;
}
if (is_dp)
args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */
else if (amdgpu_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v3.acConfig.fCoherentMode = 1;
if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
args.v3.acConfig.fDualLinkConnector = 1;
}
break;
case 4:
args.v4.ucAction = action;
if (action == ATOM_TRANSMITTER_ACTION_INIT) {
args.v4.usInitInfo = cpu_to_le16(connector_object_id);
} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
args.v4.asMode.ucLaneSel = lane_num;
args.v4.asMode.ucLaneSet = lane_set;
} else {
if (is_dp)
args.v4.usPixelClock = cpu_to_le16(dp_clock / 10);
else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
args.v4.usPixelClock = cpu_to_le16((amdgpu_encoder->pixel_clock / 2) / 10);
else
args.v4.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
}
if (is_dp)
args.v4.ucLaneNum = dp_lane_count;
else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
args.v4.ucLaneNum = 8;
else
args.v4.ucLaneNum = 4;
if (dig->linkb)
args.v4.acConfig.ucLinkSel = 1;
if (dig_encoder & 1)
args.v4.acConfig.ucEncoderSel = 1;
/* Select the PLL for the PHY
* DP PHY should be clocked from external src if there is
* one.
*/
/* On DCE5 DCPLL usually generates the DP ref clock */
if (is_dp) {
if (adev->clock.dp_extclk)
args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_EXTCLK;
else
args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_DCPLL;
} else
args.v4.acConfig.ucRefClkSource = pll_id;
switch (amdgpu_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v4.acConfig.ucTransmitterSel = 0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
args.v4.acConfig.ucTransmitterSel = 1;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
args.v4.acConfig.ucTransmitterSel = 2;
break;
}
if (is_dp)
args.v4.acConfig.fCoherentMode = 1; /* DP requires coherent */
else if (amdgpu_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v4.acConfig.fCoherentMode = 1;
if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
args.v4.acConfig.fDualLinkConnector = 1;
}
break;
case 5:
args.v5.ucAction = action;
if (is_dp)
args.v5.usSymClock = cpu_to_le16(dp_clock / 10);
else
args.v5.usSymClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
switch (amdgpu_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
if (dig->linkb)
args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYB;
else
args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYA;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
if (dig->linkb)
args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYD;
else
args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYC;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
if (dig->linkb)
args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYF;
else
args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYE;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYG;
break;
}
if (is_dp)
args.v5.ucLaneNum = dp_lane_count;
else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
args.v5.ucLaneNum = 8;
else
args.v5.ucLaneNum = 4;
args.v5.ucConnObjId = connector_object_id;
args.v5.ucDigMode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
if (is_dp && adev->clock.dp_extclk)
args.v5.asConfig.ucPhyClkSrcId = ENCODER_REFCLK_SRC_EXTCLK;
else
args.v5.asConfig.ucPhyClkSrcId = pll_id;
if (is_dp)
args.v5.asConfig.ucCoherentMode = 1; /* DP requires coherent */
else if (amdgpu_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v5.asConfig.ucCoherentMode = 1;
}
if (hpd_id == AMDGPU_HPD_NONE)
args.v5.asConfig.ucHPDSel = 0;
else
args.v5.asConfig.ucHPDSel = hpd_id + 1;
args.v5.ucDigEncoderSel = 1 << dig_encoder;
args.v5.ucDPLaneSet = lane_set;
break;
case 6:
args.v6.ucAction = action;
if (is_dp)
args.v6.ulSymClock = cpu_to_le32(dp_clock / 10);
else
args.v6.ulSymClock = cpu_to_le32(amdgpu_encoder->pixel_clock / 10);
switch (amdgpu_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
if (dig->linkb)
args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYB;
else
args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYA;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
if (dig->linkb)
args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYD;
else
args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYC;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
if (dig->linkb)
args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYF;
else
args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYE;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYG;
break;
}
if (is_dp)
args.v6.ucLaneNum = dp_lane_count;
else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
args.v6.ucLaneNum = 8;
else
args.v6.ucLaneNum = 4;
args.v6.ucConnObjId = connector_object_id;
if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH)
args.v6.ucDPLaneSet = lane_set;
else
args.v6.ucDigMode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
if (hpd_id == AMDGPU_HPD_NONE)
args.v6.ucHPDSel = 0;
else
args.v6.ucHPDSel = hpd_id + 1;
args.v6.ucDigEncoderSel = 1 << dig_encoder;
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
break;
}
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
break;
}
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
}
bool
amdgpu_atombios_encoder_set_edp_panel_power(struct drm_connector *connector,
int action)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct drm_device *dev = amdgpu_connector->base.dev;
struct amdgpu_device *adev = drm_to_adev(dev);
union dig_transmitter_control args;
int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
uint8_t frev, crev;
if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
goto done;
if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) &&
(action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
goto done;
if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
goto done;
memset(&args, 0, sizeof(args));
args.v1.ucAction = action;
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
/* wait for the panel to power up */
if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) {
int i;
for (i = 0; i < 300; i++) {
if (amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd))
return true;
mdelay(1);
}
return false;
}
done:
return true;
}
union external_encoder_control {
EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1;
EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3;
};
static void
amdgpu_atombios_encoder_setup_external_encoder(struct drm_encoder *encoder,
struct drm_encoder *ext_encoder,
int action)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder *ext_amdgpu_encoder = to_amdgpu_encoder(ext_encoder);
union external_encoder_control args;
struct drm_connector *connector;
int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl);
u8 frev, crev;
int dp_clock = 0;
int dp_lane_count = 0;
int connector_object_id = 0;
u32 ext_enum = (ext_amdgpu_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
connector = amdgpu_get_connector_for_encoder_init(encoder);
else
connector = amdgpu_get_connector_for_encoder(encoder);
if (connector) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct amdgpu_connector_atom_dig *dig_connector =
amdgpu_connector->con_priv;
dp_clock = dig_connector->dp_clock;
dp_lane_count = dig_connector->dp_lane_count;
connector_object_id =
(amdgpu_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
}
memset(&args, 0, sizeof(args));
if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
return;
switch (frev) {
case 1:
/* no params on frev 1 */
break;
case 2:
switch (crev) {
case 1:
case 2:
args.v1.sDigEncoder.ucAction = action;
args.v1.sDigEncoder.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
args.v1.sDigEncoder.ucEncoderMode =
amdgpu_atombios_encoder_get_encoder_mode(encoder);
if (ENCODER_MODE_IS_DP(args.v1.sDigEncoder.ucEncoderMode)) {
if (dp_clock == 270000)
args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
args.v1.sDigEncoder.ucLaneNum = dp_lane_count;
} else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
args.v1.sDigEncoder.ucLaneNum = 8;
else
args.v1.sDigEncoder.ucLaneNum = 4;
break;
case 3:
args.v3.sExtEncoder.ucAction = action;
if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id);
else
args.v3.sExtEncoder.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
args.v3.sExtEncoder.ucEncoderMode =
amdgpu_atombios_encoder_get_encoder_mode(encoder);
if (ENCODER_MODE_IS_DP(args.v3.sExtEncoder.ucEncoderMode)) {
if (dp_clock == 270000)
args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
else if (dp_clock == 540000)
args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ;
args.v3.sExtEncoder.ucLaneNum = dp_lane_count;
} else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
args.v3.sExtEncoder.ucLaneNum = 8;
else
args.v3.sExtEncoder.ucLaneNum = 4;
switch (ext_enum) {
case GRAPH_OBJECT_ENUM_ID1:
args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1;
break;
case GRAPH_OBJECT_ENUM_ID2:
args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2;
break;
case GRAPH_OBJECT_ENUM_ID3:
args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3;
break;
}
args.v3.sExtEncoder.ucBitPerColor = amdgpu_atombios_encoder_get_bpc(encoder);
break;
default:
DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
return;
}
break;
default:
DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
return;
}
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
}
static void
amdgpu_atombios_encoder_setup_dig(struct drm_encoder *encoder, int action)
{
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_encoder *ext_encoder = amdgpu_get_external_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
struct amdgpu_connector *amdgpu_connector = NULL;
struct amdgpu_connector_atom_dig *amdgpu_dig_connector = NULL;
if (connector) {
amdgpu_connector = to_amdgpu_connector(connector);
amdgpu_dig_connector = amdgpu_connector->con_priv;
}
if (action == ATOM_ENABLE) {
if (!connector)
dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
else
dig->panel_mode = amdgpu_atombios_dp_get_panel_mode(encoder, connector);
/* setup and enable the encoder */
amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_SETUP, 0);
amdgpu_atombios_encoder_setup_dig_encoder(encoder,
ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
dig->panel_mode);
if (ext_encoder)
amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(encoder)) &&
connector) {
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
amdgpu_atombios_encoder_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_ON);
amdgpu_dig_connector->edp_on = true;
}
}
/* enable the transmitter */
amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
ATOM_TRANSMITTER_ACTION_ENABLE,
0, 0);
if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(encoder)) &&
connector) {
/* DP_SET_POWER_D0 is set in amdgpu_atombios_dp_link_train */
amdgpu_atombios_dp_link_train(encoder, connector);
amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
}
if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
amdgpu_atombios_encoder_set_backlight_level(amdgpu_encoder, dig->backlight_level);
if (ext_encoder)
amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_ENABLE);
} else {
if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(encoder)) &&
connector)
amdgpu_atombios_encoder_setup_dig_encoder(encoder,
ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
if (ext_encoder)
amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_DISABLE);
if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(encoder)) &&
connector)
amdgpu_atombios_dp_set_rx_power_state(connector, DP_SET_POWER_D3);
/* disable the transmitter */
amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(encoder)) &&
connector) {
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
amdgpu_atombios_encoder_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_OFF);
amdgpu_dig_connector->edp_on = false;
}
}
}
}
void
amdgpu_atombios_encoder_dpms(struct drm_encoder *encoder, int mode)
{
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
amdgpu_encoder->encoder_id, mode, amdgpu_encoder->devices,
amdgpu_encoder->active_device);
switch (amdgpu_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
switch (mode) {
case DRM_MODE_DPMS_ON:
amdgpu_atombios_encoder_setup_dig(encoder, ATOM_ENABLE);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
amdgpu_atombios_encoder_setup_dig(encoder, ATOM_DISABLE);
break;
}
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
switch (mode) {
case DRM_MODE_DPMS_ON:
amdgpu_atombios_encoder_setup_dvo(encoder, ATOM_ENABLE);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
amdgpu_atombios_encoder_setup_dvo(encoder, ATOM_DISABLE);
break;
}
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
switch (mode) {
case DRM_MODE_DPMS_ON:
amdgpu_atombios_encoder_setup_dac(encoder, ATOM_ENABLE);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
amdgpu_atombios_encoder_setup_dac(encoder, ATOM_DISABLE);
break;
}
break;
default:
return;
}
}
union crtc_source_param {
SELECT_CRTC_SOURCE_PS_ALLOCATION v1;
SELECT_CRTC_SOURCE_PARAMETERS_V2 v2;
SELECT_CRTC_SOURCE_PARAMETERS_V3 v3;
};
void
amdgpu_atombios_encoder_set_crtc_source(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
union crtc_source_param args;
int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
uint8_t frev, crev;
struct amdgpu_encoder_atom_dig *dig;
memset(&args, 0, sizeof(args));
if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
return;
switch (frev) {
case 1:
switch (crev) {
case 1:
default:
args.v1.ucCRTC = amdgpu_crtc->crtc_id;
switch (amdgpu_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
args.v1.ucDevice = ATOM_DEVICE_DFP1_INDEX;
break;
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
if (amdgpu_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT)
args.v1.ucDevice = ATOM_DEVICE_LCD1_INDEX;
else
args.v1.ucDevice = ATOM_DEVICE_DFP3_INDEX;
break;
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_DDI:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
args.v1.ucDevice = ATOM_DEVICE_DFP2_INDEX;
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
if (amdgpu_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
else if (amdgpu_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
else
args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX;
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
if (amdgpu_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
else if (amdgpu_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
else
args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX;
break;
}
break;
case 2:
args.v2.ucCRTC = amdgpu_crtc->crtc_id;
if (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE) {
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA)
args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT;
else
args.v2.ucEncodeMode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
} else if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
} else {
args.v2.ucEncodeMode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
}
switch (amdgpu_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
dig = amdgpu_encoder->enc_priv;
switch (dig->dig_encoder) {
case 0:
args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
break;
case 1:
args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
break;
case 2:
args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
break;
case 3:
args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
break;
case 4:
args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
break;
case 5:
args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
break;
case 6:
args.v2.ucEncoderID = ASIC_INT_DIG7_ENCODER_ID;
break;
}
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
if (amdgpu_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
else if (amdgpu_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
else
args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID;
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
if (amdgpu_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
else if (amdgpu_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
else
args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID;
break;
}
break;
case 3:
args.v3.ucCRTC = amdgpu_crtc->crtc_id;
if (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE) {
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA)
args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT;
else
args.v2.ucEncodeMode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
} else if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
} else {
args.v2.ucEncodeMode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
}
args.v3.ucDstBpc = amdgpu_atombios_encoder_get_bpc(encoder);
switch (amdgpu_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
dig = amdgpu_encoder->enc_priv;
switch (dig->dig_encoder) {
case 0:
args.v3.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
break;
case 1:
args.v3.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
break;
case 2:
args.v3.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
break;
case 3:
args.v3.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
break;
case 4:
args.v3.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
break;
case 5:
args.v3.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
break;
case 6:
args.v3.ucEncoderID = ASIC_INT_DIG7_ENCODER_ID;
break;
}
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
args.v3.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
if (amdgpu_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
args.v3.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
else if (amdgpu_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
args.v3.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
else
args.v3.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID;
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
if (amdgpu_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
args.v3.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
else if (amdgpu_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
args.v3.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
else
args.v3.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID;
break;
}
break;
}
break;
default:
DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
return;
}
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
}
/* This only needs to be called once at startup */
void
amdgpu_atombios_encoder_init_dig(struct amdgpu_device *adev)
{
struct drm_device *dev = adev_to_drm(adev);
struct drm_encoder *encoder;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_encoder *ext_encoder = amdgpu_get_external_encoder(encoder);
switch (amdgpu_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
amdgpu_atombios_encoder_setup_dig_transmitter(encoder, ATOM_TRANSMITTER_ACTION_INIT,
0, 0);
break;
}
if (ext_encoder)
amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT);
}
}
static bool
amdgpu_atombios_encoder_dac_load_detect(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_encoder->devices & (ATOM_DEVICE_TV_SUPPORT |
ATOM_DEVICE_CV_SUPPORT |
ATOM_DEVICE_CRT_SUPPORT)) {
DAC_LOAD_DETECTION_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, DAC_LoadDetection);
uint8_t frev, crev;
memset(&args, 0, sizeof(args));
if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
return false;
args.sDacload.ucMisc = 0;
if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) ||
(amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1))
args.sDacload.ucDacType = ATOM_DAC_A;
else
args.sDacload.ucDacType = ATOM_DAC_B;
if (amdgpu_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)
args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT);
else if (amdgpu_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)
args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT);
else if (amdgpu_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT);
if (crev >= 3)
args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
} else if (amdgpu_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT);
if (crev >= 3)
args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
}
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
return true;
} else
return false;
}
enum drm_connector_status
amdgpu_atombios_encoder_dac_detect(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
uint32_t bios_0_scratch;
if (!amdgpu_atombios_encoder_dac_load_detect(encoder, connector)) {
DRM_DEBUG_KMS("detect returned false \n");
return connector_status_unknown;
}
bios_0_scratch = RREG32(mmBIOS_SCRATCH_0);
DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, amdgpu_encoder->devices);
if (amdgpu_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
if (bios_0_scratch & ATOM_S0_CRT1_MASK)
return connector_status_connected;
}
if (amdgpu_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
if (bios_0_scratch & ATOM_S0_CRT2_MASK)
return connector_status_connected;
}
if (amdgpu_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
return connector_status_connected;
}
if (amdgpu_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
return connector_status_connected; /* CTV */
else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
return connector_status_connected; /* STV */
}
return connector_status_disconnected;
}
enum drm_connector_status
amdgpu_atombios_encoder_dig_detect(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct drm_encoder *ext_encoder = amdgpu_get_external_encoder(encoder);
u32 bios_0_scratch;
if (!ext_encoder)
return connector_status_unknown;
if ((amdgpu_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0)
return connector_status_unknown;
/* load detect on the dp bridge */
amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION);
bios_0_scratch = RREG32(mmBIOS_SCRATCH_0);
DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, amdgpu_encoder->devices);
if (amdgpu_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
if (bios_0_scratch & ATOM_S0_CRT1_MASK)
return connector_status_connected;
}
if (amdgpu_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
if (bios_0_scratch & ATOM_S0_CRT2_MASK)
return connector_status_connected;
}
if (amdgpu_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
return connector_status_connected;
}
if (amdgpu_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
return connector_status_connected; /* CTV */
else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
return connector_status_connected; /* STV */
}
return connector_status_disconnected;
}
void
amdgpu_atombios_encoder_setup_ext_encoder_ddc(struct drm_encoder *encoder)
{
struct drm_encoder *ext_encoder = amdgpu_get_external_encoder(encoder);
if (ext_encoder)
/* ddc_setup on the dp bridge */
amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP);
}
void
amdgpu_atombios_encoder_set_bios_scratch_regs(struct drm_connector *connector,
struct drm_encoder *encoder,
bool connected)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector =
to_amdgpu_connector(connector);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
uint32_t bios_0_scratch, bios_3_scratch, bios_6_scratch;
bios_0_scratch = RREG32(mmBIOS_SCRATCH_0);
bios_3_scratch = RREG32(mmBIOS_SCRATCH_3);
bios_6_scratch = RREG32(mmBIOS_SCRATCH_6);
if ((amdgpu_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) &&
(amdgpu_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) {
if (connected) {
DRM_DEBUG_KMS("LCD1 connected\n");
bios_0_scratch |= ATOM_S0_LCD1;
bios_3_scratch |= ATOM_S3_LCD1_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_LCD1;
} else {
DRM_DEBUG_KMS("LCD1 disconnected\n");
bios_0_scratch &= ~ATOM_S0_LCD1;
bios_3_scratch &= ~ATOM_S3_LCD1_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_LCD1;
}
}
if ((amdgpu_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) &&
(amdgpu_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) {
if (connected) {
DRM_DEBUG_KMS("CRT1 connected\n");
bios_0_scratch |= ATOM_S0_CRT1_COLOR;
bios_3_scratch |= ATOM_S3_CRT1_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_CRT1;
} else {
DRM_DEBUG_KMS("CRT1 disconnected\n");
bios_0_scratch &= ~ATOM_S0_CRT1_MASK;
bios_3_scratch &= ~ATOM_S3_CRT1_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT1;
}
}
if ((amdgpu_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) &&
(amdgpu_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) {
if (connected) {
DRM_DEBUG_KMS("CRT2 connected\n");
bios_0_scratch |= ATOM_S0_CRT2_COLOR;
bios_3_scratch |= ATOM_S3_CRT2_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_CRT2;
} else {
DRM_DEBUG_KMS("CRT2 disconnected\n");
bios_0_scratch &= ~ATOM_S0_CRT2_MASK;
bios_3_scratch &= ~ATOM_S3_CRT2_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT2;
}
}
if ((amdgpu_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) &&
(amdgpu_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) {
if (connected) {
DRM_DEBUG_KMS("DFP1 connected\n");
bios_0_scratch |= ATOM_S0_DFP1;
bios_3_scratch |= ATOM_S3_DFP1_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_DFP1;
} else {
DRM_DEBUG_KMS("DFP1 disconnected\n");
bios_0_scratch &= ~ATOM_S0_DFP1;
bios_3_scratch &= ~ATOM_S3_DFP1_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP1;
}
}
if ((amdgpu_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) &&
(amdgpu_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) {
if (connected) {
DRM_DEBUG_KMS("DFP2 connected\n");
bios_0_scratch |= ATOM_S0_DFP2;
bios_3_scratch |= ATOM_S3_DFP2_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_DFP2;
} else {
DRM_DEBUG_KMS("DFP2 disconnected\n");
bios_0_scratch &= ~ATOM_S0_DFP2;
bios_3_scratch &= ~ATOM_S3_DFP2_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP2;
}
}
if ((amdgpu_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) &&
(amdgpu_connector->devices & ATOM_DEVICE_DFP3_SUPPORT)) {
if (connected) {
DRM_DEBUG_KMS("DFP3 connected\n");
bios_0_scratch |= ATOM_S0_DFP3;
bios_3_scratch |= ATOM_S3_DFP3_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_DFP3;
} else {
DRM_DEBUG_KMS("DFP3 disconnected\n");
bios_0_scratch &= ~ATOM_S0_DFP3;
bios_3_scratch &= ~ATOM_S3_DFP3_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP3;
}
}
if ((amdgpu_encoder->devices & ATOM_DEVICE_DFP4_SUPPORT) &&
(amdgpu_connector->devices & ATOM_DEVICE_DFP4_SUPPORT)) {
if (connected) {
DRM_DEBUG_KMS("DFP4 connected\n");
bios_0_scratch |= ATOM_S0_DFP4;
bios_3_scratch |= ATOM_S3_DFP4_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_DFP4;
} else {
DRM_DEBUG_KMS("DFP4 disconnected\n");
bios_0_scratch &= ~ATOM_S0_DFP4;
bios_3_scratch &= ~ATOM_S3_DFP4_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP4;
}
}
if ((amdgpu_encoder->devices & ATOM_DEVICE_DFP5_SUPPORT) &&
(amdgpu_connector->devices & ATOM_DEVICE_DFP5_SUPPORT)) {
if (connected) {
DRM_DEBUG_KMS("DFP5 connected\n");
bios_0_scratch |= ATOM_S0_DFP5;
bios_3_scratch |= ATOM_S3_DFP5_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_DFP5;
} else {
DRM_DEBUG_KMS("DFP5 disconnected\n");
bios_0_scratch &= ~ATOM_S0_DFP5;
bios_3_scratch &= ~ATOM_S3_DFP5_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP5;
}
}
if ((amdgpu_encoder->devices & ATOM_DEVICE_DFP6_SUPPORT) &&
(amdgpu_connector->devices & ATOM_DEVICE_DFP6_SUPPORT)) {
if (connected) {
DRM_DEBUG_KMS("DFP6 connected\n");
bios_0_scratch |= ATOM_S0_DFP6;
bios_3_scratch |= ATOM_S3_DFP6_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_DFP6;
} else {
DRM_DEBUG_KMS("DFP6 disconnected\n");
bios_0_scratch &= ~ATOM_S0_DFP6;
bios_3_scratch &= ~ATOM_S3_DFP6_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP6;
}
}
WREG32(mmBIOS_SCRATCH_0, bios_0_scratch);
WREG32(mmBIOS_SCRATCH_3, bios_3_scratch);
WREG32(mmBIOS_SCRATCH_6, bios_6_scratch);
}
union lvds_info {
struct _ATOM_LVDS_INFO info;
struct _ATOM_LVDS_INFO_V12 info_12;
};
struct amdgpu_encoder_atom_dig *
amdgpu_atombios_encoder_get_lcd_info(struct amdgpu_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_mode_info *mode_info = &adev->mode_info;
int index = GetIndexIntoMasterTable(DATA, LVDS_Info);
uint16_t data_offset, misc;
union lvds_info *lvds_info;
uint8_t frev, crev;
struct amdgpu_encoder_atom_dig *lvds = NULL;
int encoder_enum = (encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
lvds_info =
(union lvds_info *)(mode_info->atom_context->bios + data_offset);
lvds =
kzalloc(sizeof(struct amdgpu_encoder_atom_dig), GFP_KERNEL);
if (!lvds)
return NULL;
lvds->native_mode.clock =
le16_to_cpu(lvds_info->info.sLCDTiming.usPixClk) * 10;
lvds->native_mode.hdisplay =
le16_to_cpu(lvds_info->info.sLCDTiming.usHActive);
lvds->native_mode.vdisplay =
le16_to_cpu(lvds_info->info.sLCDTiming.usVActive);
lvds->native_mode.htotal = lvds->native_mode.hdisplay +
le16_to_cpu(lvds_info->info.sLCDTiming.usHBlanking_Time);
lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncOffset);
lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncWidth);
lvds->native_mode.vtotal = lvds->native_mode.vdisplay +
le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time);
lvds->native_mode.vsync_start = lvds->native_mode.vdisplay +
le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset);
lvds->native_mode.vsync_end = lvds->native_mode.vsync_start +
le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth);
lvds->panel_pwr_delay =
le16_to_cpu(lvds_info->info.usOffDelayInMs);
lvds->lcd_misc = lvds_info->info.ucLVDS_Misc;
misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess);
if (misc & ATOM_VSYNC_POLARITY)
lvds->native_mode.flags |= DRM_MODE_FLAG_NVSYNC;
if (misc & ATOM_HSYNC_POLARITY)
lvds->native_mode.flags |= DRM_MODE_FLAG_NHSYNC;
if (misc & ATOM_COMPOSITESYNC)
lvds->native_mode.flags |= DRM_MODE_FLAG_CSYNC;
if (misc & ATOM_INTERLACE)
lvds->native_mode.flags |= DRM_MODE_FLAG_INTERLACE;
if (misc & ATOM_DOUBLE_CLOCK_MODE)
lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
lvds->native_mode.width_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageHSize);
lvds->native_mode.height_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageVSize);
/* set crtc values */
drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
lvds->lcd_ss_id = lvds_info->info.ucSS_Id;
encoder->native_mode = lvds->native_mode;
if (encoder_enum == 2)
lvds->linkb = true;
else
lvds->linkb = false;
/* parse the lcd record table */
if (le16_to_cpu(lvds_info->info.usModePatchTableOffset)) {
ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record;
ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record;
bool bad_record = false;
u8 *record;
if ((frev == 1) && (crev < 2))
/* absolute */
record = (u8 *)(mode_info->atom_context->bios +
le16_to_cpu(lvds_info->info.usModePatchTableOffset));
else
/* relative */
record = (u8 *)(mode_info->atom_context->bios +
data_offset +
le16_to_cpu(lvds_info->info.usModePatchTableOffset));
while (*record != ATOM_RECORD_END_TYPE) {
switch (*record) {
case LCD_MODE_PATCH_RECORD_MODE_TYPE:
record += sizeof(ATOM_PATCH_RECORD_MODE);
break;
case LCD_RTS_RECORD_TYPE:
record += sizeof(ATOM_LCD_RTS_RECORD);
break;
case LCD_CAP_RECORD_TYPE:
record += sizeof(ATOM_LCD_MODE_CONTROL_CAP);
break;
case LCD_FAKE_EDID_PATCH_RECORD_TYPE:
fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record;
if (fake_edid_record->ucFakeEDIDLength) {
struct edid *edid;
int edid_size =
max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength);
edid = kmalloc(edid_size, GFP_KERNEL);
if (edid) {
memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0],
fake_edid_record->ucFakeEDIDLength);
if (drm_edid_is_valid(edid)) {
adev->mode_info.bios_hardcoded_edid = edid;
adev->mode_info.bios_hardcoded_edid_size = edid_size;
} else
kfree(edid);
}
}
record += fake_edid_record->ucFakeEDIDLength ?
struct_size(fake_edid_record,
ucFakeEDIDString,
fake_edid_record->ucFakeEDIDLength) :
/* empty fake edid record must be 3 bytes long */
sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1;
break;
case LCD_PANEL_RESOLUTION_RECORD_TYPE:
panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
lvds->native_mode.width_mm = panel_res_record->usHSize;
lvds->native_mode.height_mm = panel_res_record->usVSize;
record += sizeof(ATOM_PANEL_RESOLUTION_PATCH_RECORD);
break;
default:
DRM_ERROR("Bad LCD record %d\n", *record);
bad_record = true;
break;
}
if (bad_record)
break;
}
}
}
return lvds;
}
struct amdgpu_encoder_atom_dig *
amdgpu_atombios_encoder_get_dig_info(struct amdgpu_encoder *amdgpu_encoder)
{
int encoder_enum = (amdgpu_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
struct amdgpu_encoder_atom_dig *dig = kzalloc(sizeof(struct amdgpu_encoder_atom_dig), GFP_KERNEL);
if (!dig)
return NULL;
/* coherent mode by default */
dig->coherent_mode = true;
dig->dig_encoder = -1;
if (encoder_enum == 2)
dig->linkb = true;
else
dig->linkb = false;
return dig;
}
| linux-master | drivers/gpu/drm/amd/amdgpu/atombios_encoders.c |
/*
* Copyright 2012 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Alex Deucher
*/
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "amdgpu_ih.h"
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
#include "cikd.h"
#include "atom.h"
#include "amd_pcie.h"
#include "cik.h"
#include "gmc_v7_0.h"
#include "cik_ih.h"
#include "dce_v8_0.h"
#include "gfx_v7_0.h"
#include "cik_sdma.h"
#include "uvd_v4_2.h"
#include "vce_v2_0.h"
#include "cik_dpm.h"
#include "uvd/uvd_4_2_d.h"
#include "smu/smu_7_0_1_d.h"
#include "smu/smu_7_0_1_sh_mask.h"
#include "dce/dce_8_0_d.h"
#include "dce/dce_8_0_sh_mask.h"
#include "bif/bif_4_1_d.h"
#include "bif/bif_4_1_sh_mask.h"
#include "gca/gfx_7_2_d.h"
#include "gca/gfx_7_2_enum.h"
#include "gca/gfx_7_2_sh_mask.h"
#include "gmc/gmc_7_1_d.h"
#include "gmc/gmc_7_1_sh_mask.h"
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
#include "amdgpu_dm.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_vkms.h"
static const struct amdgpu_video_codec_info cik_video_codecs_encode_array[] =
{
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
.max_width = 2048,
.max_height = 1152,
.max_pixels_per_frame = 2048 * 1152,
.max_level = 0,
},
};
static const struct amdgpu_video_codecs cik_video_codecs_encode =
{
.codec_count = ARRAY_SIZE(cik_video_codecs_encode_array),
.codec_array = cik_video_codecs_encode_array,
};
static const struct amdgpu_video_codec_info cik_video_codecs_decode_array[] =
{
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
.max_width = 2048,
.max_height = 1152,
.max_pixels_per_frame = 2048 * 1152,
.max_level = 3,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
.max_width = 2048,
.max_height = 1152,
.max_pixels_per_frame = 2048 * 1152,
.max_level = 5,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
.max_width = 2048,
.max_height = 1152,
.max_pixels_per_frame = 2048 * 1152,
.max_level = 41,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
.max_width = 2048,
.max_height = 1152,
.max_pixels_per_frame = 2048 * 1152,
.max_level = 4,
},
};
static const struct amdgpu_video_codecs cik_video_codecs_decode =
{
.codec_count = ARRAY_SIZE(cik_video_codecs_decode_array),
.codec_array = cik_video_codecs_decode_array,
};
static int cik_query_video_codecs(struct amdgpu_device *adev, bool encode,
const struct amdgpu_video_codecs **codecs)
{
switch (adev->asic_type) {
case CHIP_BONAIRE:
case CHIP_HAWAII:
case CHIP_KAVERI:
case CHIP_KABINI:
case CHIP_MULLINS:
if (encode)
*codecs = &cik_video_codecs_encode;
else
*codecs = &cik_video_codecs_decode;
return 0;
default:
return -EINVAL;
}
}
/*
* Indirect registers accessor
*/
static u32 cik_pcie_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags;
u32 r;
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(mmPCIE_INDEX, reg);
(void)RREG32(mmPCIE_INDEX);
r = RREG32(mmPCIE_DATA);
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
return r;
}
static void cik_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long flags;
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(mmPCIE_INDEX, reg);
(void)RREG32(mmPCIE_INDEX);
WREG32(mmPCIE_DATA, v);
(void)RREG32(mmPCIE_DATA);
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
}
static u32 cik_smc_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags;
u32 r;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
WREG32(mmSMC_IND_INDEX_0, (reg));
r = RREG32(mmSMC_IND_DATA_0);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return r;
}
static void cik_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long flags;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
WREG32(mmSMC_IND_INDEX_0, (reg));
WREG32(mmSMC_IND_DATA_0, (v));
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
}
static u32 cik_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags;
u32 r;
spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
r = RREG32(mmUVD_CTX_DATA);
spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
return r;
}
static void cik_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long flags;
spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
WREG32(mmUVD_CTX_DATA, (v));
spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
}
static u32 cik_didt_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags;
u32 r;
spin_lock_irqsave(&adev->didt_idx_lock, flags);
WREG32(mmDIDT_IND_INDEX, (reg));
r = RREG32(mmDIDT_IND_DATA);
spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
return r;
}
static void cik_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long flags;
spin_lock_irqsave(&adev->didt_idx_lock, flags);
WREG32(mmDIDT_IND_INDEX, (reg));
WREG32(mmDIDT_IND_DATA, (v));
spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
}
static const u32 bonaire_golden_spm_registers[] =
{
0xc200, 0xe0ffffff, 0xe0000000
};
static const u32 bonaire_golden_common_registers[] =
{
0x31dc, 0xffffffff, 0x00000800,
0x31dd, 0xffffffff, 0x00000800,
0x31e6, 0xffffffff, 0x00007fbf,
0x31e7, 0xffffffff, 0x00007faf
};
static const u32 bonaire_golden_registers[] =
{
0xcd5, 0x00000333, 0x00000333,
0xcd4, 0x000c0fc0, 0x00040200,
0x2684, 0x00010000, 0x00058208,
0xf000, 0xffff1fff, 0x00140000,
0xf080, 0xfdfc0fff, 0x00000100,
0xf08d, 0x40000000, 0x40000200,
0x260c, 0xffffffff, 0x00000000,
0x260d, 0xf00fffff, 0x00000400,
0x260e, 0x0002021c, 0x00020200,
0x31e, 0x00000080, 0x00000000,
0x16ec, 0x000000f0, 0x00000070,
0x16f0, 0xf0311fff, 0x80300000,
0x263e, 0x73773777, 0x12010001,
0xd43, 0x00810000, 0x408af000,
0x1c0c, 0x31000111, 0x00000011,
0xbd2, 0x73773777, 0x12010001,
0x883, 0x00007fb6, 0x0021a1b1,
0x884, 0x00007fb6, 0x002021b1,
0x860, 0x00007fb6, 0x00002191,
0x886, 0x00007fb6, 0x002121b1,
0x887, 0x00007fb6, 0x002021b1,
0x877, 0x00007fb6, 0x00002191,
0x878, 0x00007fb6, 0x00002191,
0xd8a, 0x0000003f, 0x0000000a,
0xd8b, 0x0000003f, 0x0000000a,
0xab9, 0x00073ffe, 0x000022a2,
0x903, 0x000007ff, 0x00000000,
0x2285, 0xf000003f, 0x00000007,
0x22fc, 0x00002001, 0x00000001,
0x22c9, 0xffffffff, 0x00ffffff,
0xc281, 0x0000ff0f, 0x00000000,
0xa293, 0x07ffffff, 0x06000000,
0x136, 0x00000fff, 0x00000100,
0xf9e, 0x00000001, 0x00000002,
0x2440, 0x03000000, 0x0362c688,
0x2300, 0x000000ff, 0x00000001,
0x390, 0x00001fff, 0x00001fff,
0x2418, 0x0000007f, 0x00000020,
0x2542, 0x00010000, 0x00010000,
0x2b05, 0x000003ff, 0x000000f3,
0x2b03, 0xffffffff, 0x00001032
};
static const u32 bonaire_mgcg_cgcg_init[] =
{
0x3108, 0xffffffff, 0xfffffffc,
0xc200, 0xffffffff, 0xe0000000,
0xf0a8, 0xffffffff, 0x00000100,
0xf082, 0xffffffff, 0x00000100,
0xf0b0, 0xffffffff, 0xc0000100,
0xf0b2, 0xffffffff, 0xc0000100,
0xf0b1, 0xffffffff, 0xc0000100,
0x1579, 0xffffffff, 0x00600100,
0xf0a0, 0xffffffff, 0x00000100,
0xf085, 0xffffffff, 0x06000100,
0xf088, 0xffffffff, 0x00000100,
0xf086, 0xffffffff, 0x06000100,
0xf081, 0xffffffff, 0x00000100,
0xf0b8, 0xffffffff, 0x00000100,
0xf089, 0xffffffff, 0x00000100,
0xf080, 0xffffffff, 0x00000100,
0xf08c, 0xffffffff, 0x00000100,
0xf08d, 0xffffffff, 0x00000100,
0xf094, 0xffffffff, 0x00000100,
0xf095, 0xffffffff, 0x00000100,
0xf096, 0xffffffff, 0x00000100,
0xf097, 0xffffffff, 0x00000100,
0xf098, 0xffffffff, 0x00000100,
0xf09f, 0xffffffff, 0x00000100,
0xf09e, 0xffffffff, 0x00000100,
0xf084, 0xffffffff, 0x06000100,
0xf0a4, 0xffffffff, 0x00000100,
0xf09d, 0xffffffff, 0x00000100,
0xf0ad, 0xffffffff, 0x00000100,
0xf0ac, 0xffffffff, 0x00000100,
0xf09c, 0xffffffff, 0x00000100,
0xc200, 0xffffffff, 0xe0000000,
0xf008, 0xffffffff, 0x00010000,
0xf009, 0xffffffff, 0x00030002,
0xf00a, 0xffffffff, 0x00040007,
0xf00b, 0xffffffff, 0x00060005,
0xf00c, 0xffffffff, 0x00090008,
0xf00d, 0xffffffff, 0x00010000,
0xf00e, 0xffffffff, 0x00030002,
0xf00f, 0xffffffff, 0x00040007,
0xf010, 0xffffffff, 0x00060005,
0xf011, 0xffffffff, 0x00090008,
0xf012, 0xffffffff, 0x00010000,
0xf013, 0xffffffff, 0x00030002,
0xf014, 0xffffffff, 0x00040007,
0xf015, 0xffffffff, 0x00060005,
0xf016, 0xffffffff, 0x00090008,
0xf017, 0xffffffff, 0x00010000,
0xf018, 0xffffffff, 0x00030002,
0xf019, 0xffffffff, 0x00040007,
0xf01a, 0xffffffff, 0x00060005,
0xf01b, 0xffffffff, 0x00090008,
0xf01c, 0xffffffff, 0x00010000,
0xf01d, 0xffffffff, 0x00030002,
0xf01e, 0xffffffff, 0x00040007,
0xf01f, 0xffffffff, 0x00060005,
0xf020, 0xffffffff, 0x00090008,
0xf021, 0xffffffff, 0x00010000,
0xf022, 0xffffffff, 0x00030002,
0xf023, 0xffffffff, 0x00040007,
0xf024, 0xffffffff, 0x00060005,
0xf025, 0xffffffff, 0x00090008,
0xf026, 0xffffffff, 0x00010000,
0xf027, 0xffffffff, 0x00030002,
0xf028, 0xffffffff, 0x00040007,
0xf029, 0xffffffff, 0x00060005,
0xf02a, 0xffffffff, 0x00090008,
0xf000, 0xffffffff, 0x96e00200,
0x21c2, 0xffffffff, 0x00900100,
0x3109, 0xffffffff, 0x0020003f,
0xe, 0xffffffff, 0x0140001c,
0xf, 0x000f0000, 0x000f0000,
0x88, 0xffffffff, 0xc060000c,
0x89, 0xc0000fff, 0x00000100,
0x3e4, 0xffffffff, 0x00000100,
0x3e6, 0x00000101, 0x00000000,
0x82a, 0xffffffff, 0x00000104,
0x1579, 0xff000fff, 0x00000100,
0xc33, 0xc0000fff, 0x00000104,
0x3079, 0x00000001, 0x00000001,
0x3403, 0xff000ff0, 0x00000100,
0x3603, 0xff000ff0, 0x00000100
};
static const u32 spectre_golden_spm_registers[] =
{
0xc200, 0xe0ffffff, 0xe0000000
};
static const u32 spectre_golden_common_registers[] =
{
0x31dc, 0xffffffff, 0x00000800,
0x31dd, 0xffffffff, 0x00000800,
0x31e6, 0xffffffff, 0x00007fbf,
0x31e7, 0xffffffff, 0x00007faf
};
static const u32 spectre_golden_registers[] =
{
0xf000, 0xffff1fff, 0x96940200,
0xf003, 0xffff0001, 0xff000000,
0xf080, 0xfffc0fff, 0x00000100,
0x1bb6, 0x00010101, 0x00010000,
0x260d, 0xf00fffff, 0x00000400,
0x260e, 0xfffffffc, 0x00020200,
0x16ec, 0x000000f0, 0x00000070,
0x16f0, 0xf0311fff, 0x80300000,
0x263e, 0x73773777, 0x12010001,
0x26df, 0x00ff0000, 0x00fc0000,
0xbd2, 0x73773777, 0x12010001,
0x2285, 0xf000003f, 0x00000007,
0x22c9, 0xffffffff, 0x00ffffff,
0xa0d4, 0x3f3f3fff, 0x00000082,
0xa0d5, 0x0000003f, 0x00000000,
0xf9e, 0x00000001, 0x00000002,
0x244f, 0xffff03df, 0x00000004,
0x31da, 0x00000008, 0x00000008,
0x2300, 0x000008ff, 0x00000800,
0x2542, 0x00010000, 0x00010000,
0x2b03, 0xffffffff, 0x54763210,
0x853e, 0x01ff01ff, 0x00000002,
0x8526, 0x007ff800, 0x00200000,
0x8057, 0xffffffff, 0x00000f40,
0xc24d, 0xffffffff, 0x00000001
};
static const u32 spectre_mgcg_cgcg_init[] =
{
0x3108, 0xffffffff, 0xfffffffc,
0xc200, 0xffffffff, 0xe0000000,
0xf0a8, 0xffffffff, 0x00000100,
0xf082, 0xffffffff, 0x00000100,
0xf0b0, 0xffffffff, 0x00000100,
0xf0b2, 0xffffffff, 0x00000100,
0xf0b1, 0xffffffff, 0x00000100,
0x1579, 0xffffffff, 0x00600100,
0xf0a0, 0xffffffff, 0x00000100,
0xf085, 0xffffffff, 0x06000100,
0xf088, 0xffffffff, 0x00000100,
0xf086, 0xffffffff, 0x06000100,
0xf081, 0xffffffff, 0x00000100,
0xf0b8, 0xffffffff, 0x00000100,
0xf089, 0xffffffff, 0x00000100,
0xf080, 0xffffffff, 0x00000100,
0xf08c, 0xffffffff, 0x00000100,
0xf08d, 0xffffffff, 0x00000100,
0xf094, 0xffffffff, 0x00000100,
0xf095, 0xffffffff, 0x00000100,
0xf096, 0xffffffff, 0x00000100,
0xf097, 0xffffffff, 0x00000100,
0xf098, 0xffffffff, 0x00000100,
0xf09f, 0xffffffff, 0x00000100,
0xf09e, 0xffffffff, 0x00000100,
0xf084, 0xffffffff, 0x06000100,
0xf0a4, 0xffffffff, 0x00000100,
0xf09d, 0xffffffff, 0x00000100,
0xf0ad, 0xffffffff, 0x00000100,
0xf0ac, 0xffffffff, 0x00000100,
0xf09c, 0xffffffff, 0x00000100,
0xc200, 0xffffffff, 0xe0000000,
0xf008, 0xffffffff, 0x00010000,
0xf009, 0xffffffff, 0x00030002,
0xf00a, 0xffffffff, 0x00040007,
0xf00b, 0xffffffff, 0x00060005,
0xf00c, 0xffffffff, 0x00090008,
0xf00d, 0xffffffff, 0x00010000,
0xf00e, 0xffffffff, 0x00030002,
0xf00f, 0xffffffff, 0x00040007,
0xf010, 0xffffffff, 0x00060005,
0xf011, 0xffffffff, 0x00090008,
0xf012, 0xffffffff, 0x00010000,
0xf013, 0xffffffff, 0x00030002,
0xf014, 0xffffffff, 0x00040007,
0xf015, 0xffffffff, 0x00060005,
0xf016, 0xffffffff, 0x00090008,
0xf017, 0xffffffff, 0x00010000,
0xf018, 0xffffffff, 0x00030002,
0xf019, 0xffffffff, 0x00040007,
0xf01a, 0xffffffff, 0x00060005,
0xf01b, 0xffffffff, 0x00090008,
0xf01c, 0xffffffff, 0x00010000,
0xf01d, 0xffffffff, 0x00030002,
0xf01e, 0xffffffff, 0x00040007,
0xf01f, 0xffffffff, 0x00060005,
0xf020, 0xffffffff, 0x00090008,
0xf021, 0xffffffff, 0x00010000,
0xf022, 0xffffffff, 0x00030002,
0xf023, 0xffffffff, 0x00040007,
0xf024, 0xffffffff, 0x00060005,
0xf025, 0xffffffff, 0x00090008,
0xf026, 0xffffffff, 0x00010000,
0xf027, 0xffffffff, 0x00030002,
0xf028, 0xffffffff, 0x00040007,
0xf029, 0xffffffff, 0x00060005,
0xf02a, 0xffffffff, 0x00090008,
0xf02b, 0xffffffff, 0x00010000,
0xf02c, 0xffffffff, 0x00030002,
0xf02d, 0xffffffff, 0x00040007,
0xf02e, 0xffffffff, 0x00060005,
0xf02f, 0xffffffff, 0x00090008,
0xf000, 0xffffffff, 0x96e00200,
0x21c2, 0xffffffff, 0x00900100,
0x3109, 0xffffffff, 0x0020003f,
0xe, 0xffffffff, 0x0140001c,
0xf, 0x000f0000, 0x000f0000,
0x88, 0xffffffff, 0xc060000c,
0x89, 0xc0000fff, 0x00000100,
0x3e4, 0xffffffff, 0x00000100,
0x3e6, 0x00000101, 0x00000000,
0x82a, 0xffffffff, 0x00000104,
0x1579, 0xff000fff, 0x00000100,
0xc33, 0xc0000fff, 0x00000104,
0x3079, 0x00000001, 0x00000001,
0x3403, 0xff000ff0, 0x00000100,
0x3603, 0xff000ff0, 0x00000100
};
static const u32 kalindi_golden_spm_registers[] =
{
0xc200, 0xe0ffffff, 0xe0000000
};
static const u32 kalindi_golden_common_registers[] =
{
0x31dc, 0xffffffff, 0x00000800,
0x31dd, 0xffffffff, 0x00000800,
0x31e6, 0xffffffff, 0x00007fbf,
0x31e7, 0xffffffff, 0x00007faf
};
static const u32 kalindi_golden_registers[] =
{
0xf000, 0xffffdfff, 0x6e944040,
0x1579, 0xff607fff, 0xfc000100,
0xf088, 0xff000fff, 0x00000100,
0xf089, 0xff000fff, 0x00000100,
0xf080, 0xfffc0fff, 0x00000100,
0x1bb6, 0x00010101, 0x00010000,
0x260c, 0xffffffff, 0x00000000,
0x260d, 0xf00fffff, 0x00000400,
0x16ec, 0x000000f0, 0x00000070,
0x16f0, 0xf0311fff, 0x80300000,
0x263e, 0x73773777, 0x12010001,
0x263f, 0xffffffff, 0x00000010,
0x26df, 0x00ff0000, 0x00fc0000,
0x200c, 0x00001f0f, 0x0000100a,
0xbd2, 0x73773777, 0x12010001,
0x902, 0x000fffff, 0x000c007f,
0x2285, 0xf000003f, 0x00000007,
0x22c9, 0x3fff3fff, 0x00ffcfff,
0xc281, 0x0000ff0f, 0x00000000,
0xa293, 0x07ffffff, 0x06000000,
0x136, 0x00000fff, 0x00000100,
0xf9e, 0x00000001, 0x00000002,
0x31da, 0x00000008, 0x00000008,
0x2300, 0x000000ff, 0x00000003,
0x853e, 0x01ff01ff, 0x00000002,
0x8526, 0x007ff800, 0x00200000,
0x8057, 0xffffffff, 0x00000f40,
0x2231, 0x001f3ae3, 0x00000082,
0x2235, 0x0000001f, 0x00000010,
0xc24d, 0xffffffff, 0x00000000
};
static const u32 kalindi_mgcg_cgcg_init[] =
{
0x3108, 0xffffffff, 0xfffffffc,
0xc200, 0xffffffff, 0xe0000000,
0xf0a8, 0xffffffff, 0x00000100,
0xf082, 0xffffffff, 0x00000100,
0xf0b0, 0xffffffff, 0x00000100,
0xf0b2, 0xffffffff, 0x00000100,
0xf0b1, 0xffffffff, 0x00000100,
0x1579, 0xffffffff, 0x00600100,
0xf0a0, 0xffffffff, 0x00000100,
0xf085, 0xffffffff, 0x06000100,
0xf088, 0xffffffff, 0x00000100,
0xf086, 0xffffffff, 0x06000100,
0xf081, 0xffffffff, 0x00000100,
0xf0b8, 0xffffffff, 0x00000100,
0xf089, 0xffffffff, 0x00000100,
0xf080, 0xffffffff, 0x00000100,
0xf08c, 0xffffffff, 0x00000100,
0xf08d, 0xffffffff, 0x00000100,
0xf094, 0xffffffff, 0x00000100,
0xf095, 0xffffffff, 0x00000100,
0xf096, 0xffffffff, 0x00000100,
0xf097, 0xffffffff, 0x00000100,
0xf098, 0xffffffff, 0x00000100,
0xf09f, 0xffffffff, 0x00000100,
0xf09e, 0xffffffff, 0x00000100,
0xf084, 0xffffffff, 0x06000100,
0xf0a4, 0xffffffff, 0x00000100,
0xf09d, 0xffffffff, 0x00000100,
0xf0ad, 0xffffffff, 0x00000100,
0xf0ac, 0xffffffff, 0x00000100,
0xf09c, 0xffffffff, 0x00000100,
0xc200, 0xffffffff, 0xe0000000,
0xf008, 0xffffffff, 0x00010000,
0xf009, 0xffffffff, 0x00030002,
0xf00a, 0xffffffff, 0x00040007,
0xf00b, 0xffffffff, 0x00060005,
0xf00c, 0xffffffff, 0x00090008,
0xf00d, 0xffffffff, 0x00010000,
0xf00e, 0xffffffff, 0x00030002,
0xf00f, 0xffffffff, 0x00040007,
0xf010, 0xffffffff, 0x00060005,
0xf011, 0xffffffff, 0x00090008,
0xf000, 0xffffffff, 0x96e00200,
0x21c2, 0xffffffff, 0x00900100,
0x3109, 0xffffffff, 0x0020003f,
0xe, 0xffffffff, 0x0140001c,
0xf, 0x000f0000, 0x000f0000,
0x88, 0xffffffff, 0xc060000c,
0x89, 0xc0000fff, 0x00000100,
0x82a, 0xffffffff, 0x00000104,
0x1579, 0xff000fff, 0x00000100,
0xc33, 0xc0000fff, 0x00000104,
0x3079, 0x00000001, 0x00000001,
0x3403, 0xff000ff0, 0x00000100,
0x3603, 0xff000ff0, 0x00000100
};
static const u32 hawaii_golden_spm_registers[] =
{
0xc200, 0xe0ffffff, 0xe0000000
};
static const u32 hawaii_golden_common_registers[] =
{
0xc200, 0xffffffff, 0xe0000000,
0xa0d4, 0xffffffff, 0x3a00161a,
0xa0d5, 0xffffffff, 0x0000002e,
0x2684, 0xffffffff, 0x00018208,
0x263e, 0xffffffff, 0x12011003
};
static const u32 hawaii_golden_registers[] =
{
0xcd5, 0x00000333, 0x00000333,
0x2684, 0x00010000, 0x00058208,
0x260c, 0xffffffff, 0x00000000,
0x260d, 0xf00fffff, 0x00000400,
0x260e, 0x0002021c, 0x00020200,
0x31e, 0x00000080, 0x00000000,
0x16ec, 0x000000f0, 0x00000070,
0x16f0, 0xf0311fff, 0x80300000,
0xd43, 0x00810000, 0x408af000,
0x1c0c, 0x31000111, 0x00000011,
0xbd2, 0x73773777, 0x12010001,
0x848, 0x0000007f, 0x0000001b,
0x877, 0x00007fb6, 0x00002191,
0xd8a, 0x0000003f, 0x0000000a,
0xd8b, 0x0000003f, 0x0000000a,
0xab9, 0x00073ffe, 0x000022a2,
0x903, 0x000007ff, 0x00000000,
0x22fc, 0x00002001, 0x00000001,
0x22c9, 0xffffffff, 0x00ffffff,
0xc281, 0x0000ff0f, 0x00000000,
0xa293, 0x07ffffff, 0x06000000,
0xf9e, 0x00000001, 0x00000002,
0x31da, 0x00000008, 0x00000008,
0x31dc, 0x00000f00, 0x00000800,
0x31dd, 0x00000f00, 0x00000800,
0x31e6, 0x00ffffff, 0x00ff7fbf,
0x31e7, 0x00ffffff, 0x00ff7faf,
0x2300, 0x000000ff, 0x00000800,
0x390, 0x00001fff, 0x00001fff,
0x2418, 0x0000007f, 0x00000020,
0x2542, 0x00010000, 0x00010000,
0x2b80, 0x00100000, 0x000ff07c,
0x2b05, 0x000003ff, 0x0000000f,
0x2b04, 0xffffffff, 0x7564fdec,
0x2b03, 0xffffffff, 0x3120b9a8,
0x2b02, 0x20000000, 0x0f9c0000
};
static const u32 hawaii_mgcg_cgcg_init[] =
{
0x3108, 0xffffffff, 0xfffffffd,
0xc200, 0xffffffff, 0xe0000000,
0xf0a8, 0xffffffff, 0x00000100,
0xf082, 0xffffffff, 0x00000100,
0xf0b0, 0xffffffff, 0x00000100,
0xf0b2, 0xffffffff, 0x00000100,
0xf0b1, 0xffffffff, 0x00000100,
0x1579, 0xffffffff, 0x00200100,
0xf0a0, 0xffffffff, 0x00000100,
0xf085, 0xffffffff, 0x06000100,
0xf088, 0xffffffff, 0x00000100,
0xf086, 0xffffffff, 0x06000100,
0xf081, 0xffffffff, 0x00000100,
0xf0b8, 0xffffffff, 0x00000100,
0xf089, 0xffffffff, 0x00000100,
0xf080, 0xffffffff, 0x00000100,
0xf08c, 0xffffffff, 0x00000100,
0xf08d, 0xffffffff, 0x00000100,
0xf094, 0xffffffff, 0x00000100,
0xf095, 0xffffffff, 0x00000100,
0xf096, 0xffffffff, 0x00000100,
0xf097, 0xffffffff, 0x00000100,
0xf098, 0xffffffff, 0x00000100,
0xf09f, 0xffffffff, 0x00000100,
0xf09e, 0xffffffff, 0x00000100,
0xf084, 0xffffffff, 0x06000100,
0xf0a4, 0xffffffff, 0x00000100,
0xf09d, 0xffffffff, 0x00000100,
0xf0ad, 0xffffffff, 0x00000100,
0xf0ac, 0xffffffff, 0x00000100,
0xf09c, 0xffffffff, 0x00000100,
0xc200, 0xffffffff, 0xe0000000,
0xf008, 0xffffffff, 0x00010000,
0xf009, 0xffffffff, 0x00030002,
0xf00a, 0xffffffff, 0x00040007,
0xf00b, 0xffffffff, 0x00060005,
0xf00c, 0xffffffff, 0x00090008,
0xf00d, 0xffffffff, 0x00010000,
0xf00e, 0xffffffff, 0x00030002,
0xf00f, 0xffffffff, 0x00040007,
0xf010, 0xffffffff, 0x00060005,
0xf011, 0xffffffff, 0x00090008,
0xf012, 0xffffffff, 0x00010000,
0xf013, 0xffffffff, 0x00030002,
0xf014, 0xffffffff, 0x00040007,
0xf015, 0xffffffff, 0x00060005,
0xf016, 0xffffffff, 0x00090008,
0xf017, 0xffffffff, 0x00010000,
0xf018, 0xffffffff, 0x00030002,
0xf019, 0xffffffff, 0x00040007,
0xf01a, 0xffffffff, 0x00060005,
0xf01b, 0xffffffff, 0x00090008,
0xf01c, 0xffffffff, 0x00010000,
0xf01d, 0xffffffff, 0x00030002,
0xf01e, 0xffffffff, 0x00040007,
0xf01f, 0xffffffff, 0x00060005,
0xf020, 0xffffffff, 0x00090008,
0xf021, 0xffffffff, 0x00010000,
0xf022, 0xffffffff, 0x00030002,
0xf023, 0xffffffff, 0x00040007,
0xf024, 0xffffffff, 0x00060005,
0xf025, 0xffffffff, 0x00090008,
0xf026, 0xffffffff, 0x00010000,
0xf027, 0xffffffff, 0x00030002,
0xf028, 0xffffffff, 0x00040007,
0xf029, 0xffffffff, 0x00060005,
0xf02a, 0xffffffff, 0x00090008,
0xf02b, 0xffffffff, 0x00010000,
0xf02c, 0xffffffff, 0x00030002,
0xf02d, 0xffffffff, 0x00040007,
0xf02e, 0xffffffff, 0x00060005,
0xf02f, 0xffffffff, 0x00090008,
0xf030, 0xffffffff, 0x00010000,
0xf031, 0xffffffff, 0x00030002,
0xf032, 0xffffffff, 0x00040007,
0xf033, 0xffffffff, 0x00060005,
0xf034, 0xffffffff, 0x00090008,
0xf035, 0xffffffff, 0x00010000,
0xf036, 0xffffffff, 0x00030002,
0xf037, 0xffffffff, 0x00040007,
0xf038, 0xffffffff, 0x00060005,
0xf039, 0xffffffff, 0x00090008,
0xf03a, 0xffffffff, 0x00010000,
0xf03b, 0xffffffff, 0x00030002,
0xf03c, 0xffffffff, 0x00040007,
0xf03d, 0xffffffff, 0x00060005,
0xf03e, 0xffffffff, 0x00090008,
0x30c6, 0xffffffff, 0x00020200,
0xcd4, 0xffffffff, 0x00000200,
0x570, 0xffffffff, 0x00000400,
0x157a, 0xffffffff, 0x00000000,
0xbd4, 0xffffffff, 0x00000902,
0xf000, 0xffffffff, 0x96940200,
0x21c2, 0xffffffff, 0x00900100,
0x3109, 0xffffffff, 0x0020003f,
0xe, 0xffffffff, 0x0140001c,
0xf, 0x000f0000, 0x000f0000,
0x88, 0xffffffff, 0xc060000c,
0x89, 0xc0000fff, 0x00000100,
0x3e4, 0xffffffff, 0x00000100,
0x3e6, 0x00000101, 0x00000000,
0x82a, 0xffffffff, 0x00000104,
0x1579, 0xff000fff, 0x00000100,
0xc33, 0xc0000fff, 0x00000104,
0x3079, 0x00000001, 0x00000001,
0x3403, 0xff000ff0, 0x00000100,
0x3603, 0xff000ff0, 0x00000100
};
static const u32 godavari_golden_registers[] =
{
0x1579, 0xff607fff, 0xfc000100,
0x1bb6, 0x00010101, 0x00010000,
0x260c, 0xffffffff, 0x00000000,
0x260c0, 0xf00fffff, 0x00000400,
0x184c, 0xffffffff, 0x00010000,
0x16ec, 0x000000f0, 0x00000070,
0x16f0, 0xf0311fff, 0x80300000,
0x263e, 0x73773777, 0x12010001,
0x263f, 0xffffffff, 0x00000010,
0x200c, 0x00001f0f, 0x0000100a,
0xbd2, 0x73773777, 0x12010001,
0x902, 0x000fffff, 0x000c007f,
0x2285, 0xf000003f, 0x00000007,
0x22c9, 0xffffffff, 0x00ff0fff,
0xc281, 0x0000ff0f, 0x00000000,
0xa293, 0x07ffffff, 0x06000000,
0x136, 0x00000fff, 0x00000100,
0x3405, 0x00010000, 0x00810001,
0x3605, 0x00010000, 0x00810001,
0xf9e, 0x00000001, 0x00000002,
0x31da, 0x00000008, 0x00000008,
0x31dc, 0x00000f00, 0x00000800,
0x31dd, 0x00000f00, 0x00000800,
0x31e6, 0x00ffffff, 0x00ff7fbf,
0x31e7, 0x00ffffff, 0x00ff7faf,
0x2300, 0x000000ff, 0x00000001,
0x853e, 0x01ff01ff, 0x00000002,
0x8526, 0x007ff800, 0x00200000,
0x8057, 0xffffffff, 0x00000f40,
0x2231, 0x001f3ae3, 0x00000082,
0x2235, 0x0000001f, 0x00000010,
0xc24d, 0xffffffff, 0x00000000
};
static void cik_init_golden_registers(struct amdgpu_device *adev)
{
/* Some of the registers might be dependent on GRBM_GFX_INDEX */
mutex_lock(&adev->grbm_idx_mutex);
switch (adev->asic_type) {
case CHIP_BONAIRE:
amdgpu_device_program_register_sequence(adev,
bonaire_mgcg_cgcg_init,
ARRAY_SIZE(bonaire_mgcg_cgcg_init));
amdgpu_device_program_register_sequence(adev,
bonaire_golden_registers,
ARRAY_SIZE(bonaire_golden_registers));
amdgpu_device_program_register_sequence(adev,
bonaire_golden_common_registers,
ARRAY_SIZE(bonaire_golden_common_registers));
amdgpu_device_program_register_sequence(adev,
bonaire_golden_spm_registers,
ARRAY_SIZE(bonaire_golden_spm_registers));
break;
case CHIP_KABINI:
amdgpu_device_program_register_sequence(adev,
kalindi_mgcg_cgcg_init,
ARRAY_SIZE(kalindi_mgcg_cgcg_init));
amdgpu_device_program_register_sequence(adev,
kalindi_golden_registers,
ARRAY_SIZE(kalindi_golden_registers));
amdgpu_device_program_register_sequence(adev,
kalindi_golden_common_registers,
ARRAY_SIZE(kalindi_golden_common_registers));
amdgpu_device_program_register_sequence(adev,
kalindi_golden_spm_registers,
ARRAY_SIZE(kalindi_golden_spm_registers));
break;
case CHIP_MULLINS:
amdgpu_device_program_register_sequence(adev,
kalindi_mgcg_cgcg_init,
ARRAY_SIZE(kalindi_mgcg_cgcg_init));
amdgpu_device_program_register_sequence(adev,
godavari_golden_registers,
ARRAY_SIZE(godavari_golden_registers));
amdgpu_device_program_register_sequence(adev,
kalindi_golden_common_registers,
ARRAY_SIZE(kalindi_golden_common_registers));
amdgpu_device_program_register_sequence(adev,
kalindi_golden_spm_registers,
ARRAY_SIZE(kalindi_golden_spm_registers));
break;
case CHIP_KAVERI:
amdgpu_device_program_register_sequence(adev,
spectre_mgcg_cgcg_init,
ARRAY_SIZE(spectre_mgcg_cgcg_init));
amdgpu_device_program_register_sequence(adev,
spectre_golden_registers,
ARRAY_SIZE(spectre_golden_registers));
amdgpu_device_program_register_sequence(adev,
spectre_golden_common_registers,
ARRAY_SIZE(spectre_golden_common_registers));
amdgpu_device_program_register_sequence(adev,
spectre_golden_spm_registers,
ARRAY_SIZE(spectre_golden_spm_registers));
break;
case CHIP_HAWAII:
amdgpu_device_program_register_sequence(adev,
hawaii_mgcg_cgcg_init,
ARRAY_SIZE(hawaii_mgcg_cgcg_init));
amdgpu_device_program_register_sequence(adev,
hawaii_golden_registers,
ARRAY_SIZE(hawaii_golden_registers));
amdgpu_device_program_register_sequence(adev,
hawaii_golden_common_registers,
ARRAY_SIZE(hawaii_golden_common_registers));
amdgpu_device_program_register_sequence(adev,
hawaii_golden_spm_registers,
ARRAY_SIZE(hawaii_golden_spm_registers));
break;
default:
break;
}
mutex_unlock(&adev->grbm_idx_mutex);
}
/**
* cik_get_xclk - get the xclk
*
* @adev: amdgpu_device pointer
*
* Returns the reference clock used by the gfx engine
* (CIK).
*/
static u32 cik_get_xclk(struct amdgpu_device *adev)
{
u32 reference_clock = adev->clock.spll.reference_freq;
if (adev->flags & AMD_IS_APU) {
if (RREG32_SMC(ixGENERAL_PWRMGT) & GENERAL_PWRMGT__GPU_COUNTER_CLK_MASK)
return reference_clock / 2;
} else {
if (RREG32_SMC(ixCG_CLKPIN_CNTL) & CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK)
return reference_clock / 4;
}
return reference_clock;
}
/**
* cik_srbm_select - select specific register instances
*
* @adev: amdgpu_device pointer
* @me: selected ME (micro engine)
* @pipe: pipe
* @queue: queue
* @vmid: VMID
*
* Switches the currently active registers instances. Some
* registers are instanced per VMID, others are instanced per
* me/pipe/queue combination.
*/
void cik_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid)
{
u32 srbm_gfx_cntl =
(((pipe << SRBM_GFX_CNTL__PIPEID__SHIFT) & SRBM_GFX_CNTL__PIPEID_MASK)|
((me << SRBM_GFX_CNTL__MEID__SHIFT) & SRBM_GFX_CNTL__MEID_MASK)|
((vmid << SRBM_GFX_CNTL__VMID__SHIFT) & SRBM_GFX_CNTL__VMID_MASK)|
((queue << SRBM_GFX_CNTL__QUEUEID__SHIFT) & SRBM_GFX_CNTL__QUEUEID_MASK));
WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
}
static void cik_vga_set_state(struct amdgpu_device *adev, bool state)
{
uint32_t tmp;
tmp = RREG32(mmCONFIG_CNTL);
if (!state)
tmp |= CONFIG_CNTL__VGA_DIS_MASK;
else
tmp &= ~CONFIG_CNTL__VGA_DIS_MASK;
WREG32(mmCONFIG_CNTL, tmp);
}
static bool cik_read_disabled_bios(struct amdgpu_device *adev)
{
u32 bus_cntl;
u32 d1vga_control = 0;
u32 d2vga_control = 0;
u32 vga_render_control = 0;
u32 rom_cntl;
bool r;
bus_cntl = RREG32(mmBUS_CNTL);
if (adev->mode_info.num_crtc) {
d1vga_control = RREG32(mmD1VGA_CONTROL);
d2vga_control = RREG32(mmD2VGA_CONTROL);
vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
}
rom_cntl = RREG32_SMC(ixROM_CNTL);
/* enable the rom */
WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
if (adev->mode_info.num_crtc) {
/* Disable VGA mode */
WREG32(mmD1VGA_CONTROL,
(d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
WREG32(mmD2VGA_CONTROL,
(d2vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
WREG32(mmVGA_RENDER_CONTROL,
(vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
}
WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
r = amdgpu_read_bios(adev);
/* restore regs */
WREG32(mmBUS_CNTL, bus_cntl);
if (adev->mode_info.num_crtc) {
WREG32(mmD1VGA_CONTROL, d1vga_control);
WREG32(mmD2VGA_CONTROL, d2vga_control);
WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
}
WREG32_SMC(ixROM_CNTL, rom_cntl);
return r;
}
static bool cik_read_bios_from_rom(struct amdgpu_device *adev,
u8 *bios, u32 length_bytes)
{
u32 *dw_ptr;
unsigned long flags;
u32 i, length_dw;
if (bios == NULL)
return false;
if (length_bytes == 0)
return false;
/* APU vbios image is part of sbios image */
if (adev->flags & AMD_IS_APU)
return false;
dw_ptr = (u32 *)bios;
length_dw = ALIGN(length_bytes, 4) / 4;
/* take the smc lock since we are using the smc index */
spin_lock_irqsave(&adev->smc_idx_lock, flags);
/* set rom index to 0 */
WREG32(mmSMC_IND_INDEX_0, ixROM_INDEX);
WREG32(mmSMC_IND_DATA_0, 0);
/* set index to data for continous read */
WREG32(mmSMC_IND_INDEX_0, ixROM_DATA);
for (i = 0; i < length_dw; i++)
dw_ptr[i] = RREG32(mmSMC_IND_DATA_0);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return true;
}
static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
{mmGRBM_STATUS},
{mmGRBM_STATUS2},
{mmGRBM_STATUS_SE0},
{mmGRBM_STATUS_SE1},
{mmGRBM_STATUS_SE2},
{mmGRBM_STATUS_SE3},
{mmSRBM_STATUS},
{mmSRBM_STATUS2},
{mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET},
{mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET},
{mmCP_STAT},
{mmCP_STALLED_STAT1},
{mmCP_STALLED_STAT2},
{mmCP_STALLED_STAT3},
{mmCP_CPF_BUSY_STAT},
{mmCP_CPF_STALLED_STAT1},
{mmCP_CPF_STATUS},
{mmCP_CPC_BUSY_STAT},
{mmCP_CPC_STALLED_STAT1},
{mmCP_CPC_STATUS},
{mmGB_ADDR_CONFIG},
{mmMC_ARB_RAMCFG},
{mmGB_TILE_MODE0},
{mmGB_TILE_MODE1},
{mmGB_TILE_MODE2},
{mmGB_TILE_MODE3},
{mmGB_TILE_MODE4},
{mmGB_TILE_MODE5},
{mmGB_TILE_MODE6},
{mmGB_TILE_MODE7},
{mmGB_TILE_MODE8},
{mmGB_TILE_MODE9},
{mmGB_TILE_MODE10},
{mmGB_TILE_MODE11},
{mmGB_TILE_MODE12},
{mmGB_TILE_MODE13},
{mmGB_TILE_MODE14},
{mmGB_TILE_MODE15},
{mmGB_TILE_MODE16},
{mmGB_TILE_MODE17},
{mmGB_TILE_MODE18},
{mmGB_TILE_MODE19},
{mmGB_TILE_MODE20},
{mmGB_TILE_MODE21},
{mmGB_TILE_MODE22},
{mmGB_TILE_MODE23},
{mmGB_TILE_MODE24},
{mmGB_TILE_MODE25},
{mmGB_TILE_MODE26},
{mmGB_TILE_MODE27},
{mmGB_TILE_MODE28},
{mmGB_TILE_MODE29},
{mmGB_TILE_MODE30},
{mmGB_TILE_MODE31},
{mmGB_MACROTILE_MODE0},
{mmGB_MACROTILE_MODE1},
{mmGB_MACROTILE_MODE2},
{mmGB_MACROTILE_MODE3},
{mmGB_MACROTILE_MODE4},
{mmGB_MACROTILE_MODE5},
{mmGB_MACROTILE_MODE6},
{mmGB_MACROTILE_MODE7},
{mmGB_MACROTILE_MODE8},
{mmGB_MACROTILE_MODE9},
{mmGB_MACROTILE_MODE10},
{mmGB_MACROTILE_MODE11},
{mmGB_MACROTILE_MODE12},
{mmGB_MACROTILE_MODE13},
{mmGB_MACROTILE_MODE14},
{mmGB_MACROTILE_MODE15},
{mmCC_RB_BACKEND_DISABLE, true},
{mmGC_USER_RB_BACKEND_DISABLE, true},
{mmGB_BACKEND_MAP, false},
{mmPA_SC_RASTER_CONFIG, true},
{mmPA_SC_RASTER_CONFIG_1, true},
};
static uint32_t cik_get_register_value(struct amdgpu_device *adev,
bool indexed, u32 se_num,
u32 sh_num, u32 reg_offset)
{
if (indexed) {
uint32_t val;
unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
switch (reg_offset) {
case mmCC_RB_BACKEND_DISABLE:
return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
case mmGC_USER_RB_BACKEND_DISABLE:
return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
case mmPA_SC_RASTER_CONFIG:
return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
case mmPA_SC_RASTER_CONFIG_1:
return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
}
mutex_lock(&adev->grbm_idx_mutex);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0);
val = RREG32(reg_offset);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
return val;
} else {
unsigned idx;
switch (reg_offset) {
case mmGB_ADDR_CONFIG:
return adev->gfx.config.gb_addr_config;
case mmMC_ARB_RAMCFG:
return adev->gfx.config.mc_arb_ramcfg;
case mmGB_TILE_MODE0:
case mmGB_TILE_MODE1:
case mmGB_TILE_MODE2:
case mmGB_TILE_MODE3:
case mmGB_TILE_MODE4:
case mmGB_TILE_MODE5:
case mmGB_TILE_MODE6:
case mmGB_TILE_MODE7:
case mmGB_TILE_MODE8:
case mmGB_TILE_MODE9:
case mmGB_TILE_MODE10:
case mmGB_TILE_MODE11:
case mmGB_TILE_MODE12:
case mmGB_TILE_MODE13:
case mmGB_TILE_MODE14:
case mmGB_TILE_MODE15:
case mmGB_TILE_MODE16:
case mmGB_TILE_MODE17:
case mmGB_TILE_MODE18:
case mmGB_TILE_MODE19:
case mmGB_TILE_MODE20:
case mmGB_TILE_MODE21:
case mmGB_TILE_MODE22:
case mmGB_TILE_MODE23:
case mmGB_TILE_MODE24:
case mmGB_TILE_MODE25:
case mmGB_TILE_MODE26:
case mmGB_TILE_MODE27:
case mmGB_TILE_MODE28:
case mmGB_TILE_MODE29:
case mmGB_TILE_MODE30:
case mmGB_TILE_MODE31:
idx = (reg_offset - mmGB_TILE_MODE0);
return adev->gfx.config.tile_mode_array[idx];
case mmGB_MACROTILE_MODE0:
case mmGB_MACROTILE_MODE1:
case mmGB_MACROTILE_MODE2:
case mmGB_MACROTILE_MODE3:
case mmGB_MACROTILE_MODE4:
case mmGB_MACROTILE_MODE5:
case mmGB_MACROTILE_MODE6:
case mmGB_MACROTILE_MODE7:
case mmGB_MACROTILE_MODE8:
case mmGB_MACROTILE_MODE9:
case mmGB_MACROTILE_MODE10:
case mmGB_MACROTILE_MODE11:
case mmGB_MACROTILE_MODE12:
case mmGB_MACROTILE_MODE13:
case mmGB_MACROTILE_MODE14:
case mmGB_MACROTILE_MODE15:
idx = (reg_offset - mmGB_MACROTILE_MODE0);
return adev->gfx.config.macrotile_mode_array[idx];
default:
return RREG32(reg_offset);
}
}
}
static int cik_read_register(struct amdgpu_device *adev, u32 se_num,
u32 sh_num, u32 reg_offset, u32 *value)
{
uint32_t i;
*value = 0;
for (i = 0; i < ARRAY_SIZE(cik_allowed_read_registers); i++) {
bool indexed = cik_allowed_read_registers[i].grbm_indexed;
if (reg_offset != cik_allowed_read_registers[i].reg_offset)
continue;
*value = cik_get_register_value(adev, indexed, se_num, sh_num,
reg_offset);
return 0;
}
return -EINVAL;
}
struct kv_reset_save_regs {
u32 gmcon_reng_execute;
u32 gmcon_misc;
u32 gmcon_misc3;
};
static void kv_save_regs_for_reset(struct amdgpu_device *adev,
struct kv_reset_save_regs *save)
{
save->gmcon_reng_execute = RREG32(mmGMCON_RENG_EXECUTE);
save->gmcon_misc = RREG32(mmGMCON_MISC);
save->gmcon_misc3 = RREG32(mmGMCON_MISC3);
WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute &
~GMCON_RENG_EXECUTE__RENG_EXECUTE_ON_PWR_UP_MASK);
WREG32(mmGMCON_MISC, save->gmcon_misc &
~(GMCON_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK |
GMCON_MISC__STCTRL_STUTTER_EN_MASK));
}
static void kv_restore_regs_for_reset(struct amdgpu_device *adev,
struct kv_reset_save_regs *save)
{
int i;
WREG32(mmGMCON_PGFSM_WRITE, 0);
WREG32(mmGMCON_PGFSM_CONFIG, 0x200010ff);
for (i = 0; i < 5; i++)
WREG32(mmGMCON_PGFSM_WRITE, 0);
WREG32(mmGMCON_PGFSM_WRITE, 0);
WREG32(mmGMCON_PGFSM_CONFIG, 0x300010ff);
for (i = 0; i < 5; i++)
WREG32(mmGMCON_PGFSM_WRITE, 0);
WREG32(mmGMCON_PGFSM_WRITE, 0x210000);
WREG32(mmGMCON_PGFSM_CONFIG, 0xa00010ff);
for (i = 0; i < 5; i++)
WREG32(mmGMCON_PGFSM_WRITE, 0);
WREG32(mmGMCON_PGFSM_WRITE, 0x21003);
WREG32(mmGMCON_PGFSM_CONFIG, 0xb00010ff);
for (i = 0; i < 5; i++)
WREG32(mmGMCON_PGFSM_WRITE, 0);
WREG32(mmGMCON_PGFSM_WRITE, 0x2b00);
WREG32(mmGMCON_PGFSM_CONFIG, 0xc00010ff);
for (i = 0; i < 5; i++)
WREG32(mmGMCON_PGFSM_WRITE, 0);
WREG32(mmGMCON_PGFSM_WRITE, 0);
WREG32(mmGMCON_PGFSM_CONFIG, 0xd00010ff);
for (i = 0; i < 5; i++)
WREG32(mmGMCON_PGFSM_WRITE, 0);
WREG32(mmGMCON_PGFSM_WRITE, 0x420000);
WREG32(mmGMCON_PGFSM_CONFIG, 0x100010ff);
for (i = 0; i < 5; i++)
WREG32(mmGMCON_PGFSM_WRITE, 0);
WREG32(mmGMCON_PGFSM_WRITE, 0x120202);
WREG32(mmGMCON_PGFSM_CONFIG, 0x500010ff);
for (i = 0; i < 5; i++)
WREG32(mmGMCON_PGFSM_WRITE, 0);
WREG32(mmGMCON_PGFSM_WRITE, 0x3e3e36);
WREG32(mmGMCON_PGFSM_CONFIG, 0x600010ff);
for (i = 0; i < 5; i++)
WREG32(mmGMCON_PGFSM_WRITE, 0);
WREG32(mmGMCON_PGFSM_WRITE, 0x373f3e);
WREG32(mmGMCON_PGFSM_CONFIG, 0x700010ff);
for (i = 0; i < 5; i++)
WREG32(mmGMCON_PGFSM_WRITE, 0);
WREG32(mmGMCON_PGFSM_WRITE, 0x3e1332);
WREG32(mmGMCON_PGFSM_CONFIG, 0xe00010ff);
WREG32(mmGMCON_MISC3, save->gmcon_misc3);
WREG32(mmGMCON_MISC, save->gmcon_misc);
WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute);
}
/**
* cik_asic_pci_config_reset - soft reset GPU
*
* @adev: amdgpu_device pointer
*
* Use PCI Config method to reset the GPU.
*
* Returns 0 for success.
*/
static int cik_asic_pci_config_reset(struct amdgpu_device *adev)
{
struct kv_reset_save_regs kv_save = { 0 };
u32 i;
int r = -EINVAL;
amdgpu_atombios_scratch_regs_engine_hung(adev, true);
if (adev->flags & AMD_IS_APU)
kv_save_regs_for_reset(adev, &kv_save);
/* disable BM */
pci_clear_master(adev->pdev);
/* reset */
amdgpu_device_pci_config_reset(adev);
udelay(100);
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
/* enable BM */
pci_set_master(adev->pdev);
adev->has_hw_reset = true;
r = 0;
break;
}
udelay(1);
}
/* does asic init need to be run first??? */
if (adev->flags & AMD_IS_APU)
kv_restore_regs_for_reset(adev, &kv_save);
amdgpu_atombios_scratch_regs_engine_hung(adev, false);
return r;
}
static bool cik_asic_supports_baco(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_BONAIRE:
case CHIP_HAWAII:
return amdgpu_dpm_is_baco_supported(adev);
default:
return false;
}
}
static enum amd_reset_method
cik_asic_reset_method(struct amdgpu_device *adev)
{
bool baco_reset;
if (amdgpu_reset_method == AMD_RESET_METHOD_LEGACY ||
amdgpu_reset_method == AMD_RESET_METHOD_BACO)
return amdgpu_reset_method;
if (amdgpu_reset_method != -1)
dev_warn(adev->dev, "Specified reset:%d isn't supported, using AUTO instead.\n",
amdgpu_reset_method);
switch (adev->asic_type) {
case CHIP_BONAIRE:
case CHIP_HAWAII:
baco_reset = cik_asic_supports_baco(adev);
break;
default:
baco_reset = false;
break;
}
if (baco_reset)
return AMD_RESET_METHOD_BACO;
else
return AMD_RESET_METHOD_LEGACY;
}
/**
* cik_asic_reset - soft reset GPU
*
* @adev: amdgpu_device pointer
*
* Look up which blocks are hung and attempt
* to reset them.
* Returns 0 for success.
*/
static int cik_asic_reset(struct amdgpu_device *adev)
{
int r;
/* APUs don't have full asic reset */
if (adev->flags & AMD_IS_APU)
return 0;
if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
dev_info(adev->dev, "BACO reset\n");
r = amdgpu_dpm_baco_reset(adev);
} else {
dev_info(adev->dev, "PCI CONFIG reset\n");
r = cik_asic_pci_config_reset(adev);
}
return r;
}
static u32 cik_get_config_memsize(struct amdgpu_device *adev)
{
return RREG32(mmCONFIG_MEMSIZE);
}
static int cik_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
u32 cntl_reg, u32 status_reg)
{
int r, i;
struct atom_clock_dividers dividers;
uint32_t tmp;
r = amdgpu_atombios_get_clock_dividers(adev,
COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
clock, false, ÷rs);
if (r)
return r;
tmp = RREG32_SMC(cntl_reg);
tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
tmp |= dividers.post_divider;
WREG32_SMC(cntl_reg, tmp);
for (i = 0; i < 100; i++) {
if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK)
break;
mdelay(10);
}
if (i == 100)
return -ETIMEDOUT;
return 0;
}
static int cik_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
{
int r = 0;
r = cik_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
if (r)
return r;
r = cik_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
return r;
}
static int cik_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
{
int r, i;
struct atom_clock_dividers dividers;
u32 tmp;
r = amdgpu_atombios_get_clock_dividers(adev,
COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
ecclk, false, ÷rs);
if (r)
return r;
for (i = 0; i < 100; i++) {
if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
break;
mdelay(10);
}
if (i == 100)
return -ETIMEDOUT;
tmp = RREG32_SMC(ixCG_ECLK_CNTL);
tmp &= ~(CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK |
CG_ECLK_CNTL__ECLK_DIVIDER_MASK);
tmp |= dividers.post_divider;
WREG32_SMC(ixCG_ECLK_CNTL, tmp);
for (i = 0; i < 100; i++) {
if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
break;
mdelay(10);
}
if (i == 100)
return -ETIMEDOUT;
return 0;
}
static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
{
struct pci_dev *root = adev->pdev->bus->self;
u32 speed_cntl, current_data_rate;
int i;
u16 tmp16;
if (pci_is_root_bus(adev->pdev->bus))
return;
if (amdgpu_pcie_gen2 == 0)
return;
if (adev->flags & AMD_IS_APU)
return;
if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
return;
speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL);
current_data_rate = (speed_cntl & PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) >>
PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
if (current_data_rate == 2) {
DRM_INFO("PCIE gen 3 link speeds already enabled\n");
return;
}
DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
} else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) {
if (current_data_rate == 1) {
DRM_INFO("PCIE gen 2 link speeds already enabled\n");
return;
}
DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n");
}
if (!pci_is_pcie(root) || !pci_is_pcie(adev->pdev))
return;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
/* re-try equalization if gen3 is not already enabled */
if (current_data_rate != 2) {
u16 bridge_cfg, gpu_cfg;
u16 bridge_cfg2, gpu_cfg2;
u32 max_lw, current_lw, tmp;
pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
pcie_capability_set_word(adev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
tmp = RREG32_PCIE(ixPCIE_LC_STATUS1);
max_lw = (tmp & PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK) >>
PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH__SHIFT;
current_lw = (tmp & PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH_MASK)
>> PCIE_LC_STATUS1__LC_OPERATING_LINK_WIDTH__SHIFT;
if (current_lw < max_lw) {
tmp = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL);
if (tmp & PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATION_SUPPORT_MASK) {
tmp &= ~(PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_MASK |
PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_DIS_MASK);
tmp |= (max_lw <<
PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH__SHIFT);
tmp |= PCIE_LC_LINK_WIDTH_CNTL__LC_UPCONFIGURE_SUPPORT_MASK |
PCIE_LC_LINK_WIDTH_CNTL__LC_RENEGOTIATE_EN_MASK |
PCIE_LC_LINK_WIDTH_CNTL__LC_RECONFIG_NOW_MASK;
WREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL, tmp);
}
}
for (i = 0; i < 10; i++) {
/* check status */
pcie_capability_read_word(adev->pdev,
PCI_EXP_DEVSTA,
&tmp16);
if (tmp16 & PCI_EXP_DEVSTA_TRPND)
break;
pcie_capability_read_word(root, PCI_EXP_LNKCTL,
&bridge_cfg);
pcie_capability_read_word(adev->pdev,
PCI_EXP_LNKCTL,
&gpu_cfg);
pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
&bridge_cfg2);
pcie_capability_read_word(adev->pdev,
PCI_EXP_LNKCTL2,
&gpu_cfg2);
tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
tmp |= PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
WREG32_PCIE(ixPCIE_LC_CNTL4, tmp);
tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
tmp |= PCIE_LC_CNTL4__LC_REDO_EQ_MASK;
WREG32_PCIE(ixPCIE_LC_CNTL4, tmp);
msleep(100);
/* linkctl */
pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_HAWD,
bridge_cfg &
PCI_EXP_LNKCTL_HAWD);
pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_HAWD,
gpu_cfg &
PCI_EXP_LNKCTL_HAWD);
/* linkctl2 */
pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
&tmp16);
tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
PCI_EXP_LNKCTL2_TX_MARGIN);
tmp16 |= (bridge_cfg2 &
(PCI_EXP_LNKCTL2_ENTER_COMP |
PCI_EXP_LNKCTL2_TX_MARGIN));
pcie_capability_write_word(root,
PCI_EXP_LNKCTL2,
tmp16);
pcie_capability_read_word(adev->pdev,
PCI_EXP_LNKCTL2,
&tmp16);
tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
PCI_EXP_LNKCTL2_TX_MARGIN);
tmp16 |= (gpu_cfg2 &
(PCI_EXP_LNKCTL2_ENTER_COMP |
PCI_EXP_LNKCTL2_TX_MARGIN));
pcie_capability_write_word(adev->pdev,
PCI_EXP_LNKCTL2,
tmp16);
tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
tmp &= ~PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
WREG32_PCIE(ixPCIE_LC_CNTL4, tmp);
}
}
}
/* set the link speed */
speed_cntl |= PCIE_LC_SPEED_CNTL__LC_FORCE_EN_SW_SPEED_CHANGE_MASK |
PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_HW_SPEED_CHANGE_MASK;
speed_cntl &= ~PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE_MASK;
WREG32_PCIE(ixPCIE_LC_SPEED_CNTL, speed_cntl);
pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL2, &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
else
tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL2, tmp16);
speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL);
speed_cntl |= PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK;
WREG32_PCIE(ixPCIE_LC_SPEED_CNTL, speed_cntl);
for (i = 0; i < adev->usec_timeout; i++) {
speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL);
if ((speed_cntl & PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK) == 0)
break;
udelay(1);
}
}
static void cik_program_aspm(struct amdgpu_device *adev)
{
u32 data, orig;
bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
bool disable_clkreq = false;
if (!amdgpu_device_should_use_aspm(adev))
return;
if (pci_is_root_bus(adev->pdev->bus))
return;
/* XXX double check APUs */
if (adev->flags & AMD_IS_APU)
return;
orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
data &= ~PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK;
data |= (0x24 << PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT) |
PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN_MASK;
if (orig != data)
WREG32_PCIE(ixPCIE_LC_N_FTS_CNTL, data);
orig = data = RREG32_PCIE(ixPCIE_LC_CNTL3);
data |= PCIE_LC_CNTL3__LC_GO_TO_RECOVERY_MASK;
if (orig != data)
WREG32_PCIE(ixPCIE_LC_CNTL3, data);
orig = data = RREG32_PCIE(ixPCIE_P_CNTL);
data |= PCIE_P_CNTL__P_IGNORE_EDB_ERR_MASK;
if (orig != data)
WREG32_PCIE(ixPCIE_P_CNTL, data);
orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
data &= ~(PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK |
PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK);
data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
if (!disable_l0s)
data |= (7 << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT);
if (!disable_l1) {
data |= (7 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT);
data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
if (orig != data)
WREG32_PCIE(ixPCIE_LC_CNTL, data);
if (!disable_plloff_in_l1) {
bool clk_req_support;
orig = data = RREG32_PCIE(ixPB0_PIF_PWRDOWN_0);
data &= ~(PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0_MASK |
PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0_MASK);
data |= (7 << PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0__SHIFT) |
(7 << PB0_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0__SHIFT);
if (orig != data)
WREG32_PCIE(ixPB0_PIF_PWRDOWN_0, data);
orig = data = RREG32_PCIE(ixPB0_PIF_PWRDOWN_1);
data &= ~(PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1_MASK |
PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1_MASK);
data |= (7 << PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1__SHIFT) |
(7 << PB0_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1__SHIFT);
if (orig != data)
WREG32_PCIE(ixPB0_PIF_PWRDOWN_1, data);
orig = data = RREG32_PCIE(ixPB1_PIF_PWRDOWN_0);
data &= ~(PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0_MASK |
PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0_MASK);
data |= (7 << PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_OFF_0__SHIFT) |
(7 << PB1_PIF_PWRDOWN_0__PLL_POWER_STATE_IN_TXS2_0__SHIFT);
if (orig != data)
WREG32_PCIE(ixPB1_PIF_PWRDOWN_0, data);
orig = data = RREG32_PCIE(ixPB1_PIF_PWRDOWN_1);
data &= ~(PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1_MASK |
PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1_MASK);
data |= (7 << PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_OFF_1__SHIFT) |
(7 << PB1_PIF_PWRDOWN_1__PLL_POWER_STATE_IN_TXS2_1__SHIFT);
if (orig != data)
WREG32_PCIE(ixPB1_PIF_PWRDOWN_1, data);
orig = data = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL);
data &= ~PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE_MASK;
data |= ~(3 << PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE__SHIFT);
if (orig != data)
WREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL, data);
if (!disable_clkreq) {
struct pci_dev *root = adev->pdev->bus->self;
u32 lnkcap;
clk_req_support = false;
pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
clk_req_support = true;
} else {
clk_req_support = false;
}
if (clk_req_support) {
orig = data = RREG32_PCIE(ixPCIE_LC_CNTL2);
data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK |
PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
if (orig != data)
WREG32_PCIE(ixPCIE_LC_CNTL2, data);
orig = data = RREG32_SMC(ixTHM_CLK_CNTL);
data &= ~(THM_CLK_CNTL__CMON_CLK_SEL_MASK |
THM_CLK_CNTL__TMON_CLK_SEL_MASK);
data |= (1 << THM_CLK_CNTL__CMON_CLK_SEL__SHIFT) |
(1 << THM_CLK_CNTL__TMON_CLK_SEL__SHIFT);
if (orig != data)
WREG32_SMC(ixTHM_CLK_CNTL, data);
orig = data = RREG32_SMC(ixMISC_CLK_CTRL);
data &= ~(MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK |
MISC_CLK_CTRL__ZCLK_SEL_MASK);
data |= (1 << MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT) |
(1 << MISC_CLK_CTRL__ZCLK_SEL__SHIFT);
if (orig != data)
WREG32_SMC(ixMISC_CLK_CTRL, data);
orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL);
data &= ~CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK;
if (orig != data)
WREG32_SMC(ixCG_CLKPIN_CNTL, data);
orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
data &= ~CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK;
if (orig != data)
WREG32_SMC(ixCG_CLKPIN_CNTL_2, data);
orig = data = RREG32_SMC(ixMPLL_BYPASSCLK_SEL);
data &= ~MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK;
data |= (4 << MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT);
if (orig != data)
WREG32_SMC(ixMPLL_BYPASSCLK_SEL, data);
}
}
} else {
if (orig != data)
WREG32_PCIE(ixPCIE_LC_CNTL, data);
}
orig = data = RREG32_PCIE(ixPCIE_CNTL2);
data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
PCIE_CNTL2__MST_MEM_LS_EN_MASK |
PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
if (orig != data)
WREG32_PCIE(ixPCIE_CNTL2, data);
if (!disable_l0s) {
data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
if ((data & PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) ==
PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) {
data = RREG32_PCIE(ixPCIE_LC_STATUS1);
if ((data & PCIE_LC_STATUS1__LC_REVERSE_XMIT_MASK) &&
(data & PCIE_LC_STATUS1__LC_REVERSE_RCVR_MASK)) {
orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
if (orig != data)
WREG32_PCIE(ixPCIE_LC_CNTL, data);
}
}
}
}
static uint32_t cik_get_rev_id(struct amdgpu_device *adev)
{
return (RREG32(mmCC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
>> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
}
static void cik_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
} else {
amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
}
}
static void cik_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32(mmHDP_DEBUG0, 1);
RREG32(mmHDP_DEBUG0);
} else {
amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
}
}
static bool cik_need_full_reset(struct amdgpu_device *adev)
{
/* change this when we support soft reset */
return true;
}
static void cik_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
uint64_t *count1)
{
uint32_t perfctr = 0;
uint64_t cnt0_of, cnt1_of;
int tmp;
/* This reports 0 on APUs, so return to avoid writing/reading registers
* that may or may not be different from their GPU counterparts
*/
if (adev->flags & AMD_IS_APU)
return;
/* Set the 2 events that we wish to watch, defined above */
/* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */
perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
/* Write to enable desired perf counters */
WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr);
/* Zero out and enable the perf counters
* Write 0x5:
* Bit 0 = Start all counters(1)
* Bit 2 = Global counter reset enable(1)
*/
WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005);
msleep(1000);
/* Load the shadow and disable the perf counters
* Write 0x2:
* Bit 0 = Stop counters(0)
* Bit 1 = Load the shadow counters(1)
*/
WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002);
/* Read register values to get any >32bit overflow */
tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK);
cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
/* Get the values and add the overflow */
*count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
*count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
}
static bool cik_need_reset_on_init(struct amdgpu_device *adev)
{
u32 clock_cntl, pc;
if (adev->flags & AMD_IS_APU)
return false;
/* check if the SMC is already running */
clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
pc = RREG32_SMC(ixSMC_PC_C);
if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) &&
(0x20100 <= pc))
return true;
return false;
}
static uint64_t cik_get_pcie_replay_count(struct amdgpu_device *adev)
{
uint64_t nak_r, nak_g;
/* Get the number of NAKs received and generated */
nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK);
nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED);
/* Add the total number of NAKs, i.e the number of replays */
return (nak_r + nak_g);
}
static void cik_pre_asic_init(struct amdgpu_device *adev)
{
}
static const struct amdgpu_asic_funcs cik_asic_funcs =
{
.read_disabled_bios = &cik_read_disabled_bios,
.read_bios_from_rom = &cik_read_bios_from_rom,
.read_register = &cik_read_register,
.reset = &cik_asic_reset,
.reset_method = &cik_asic_reset_method,
.set_vga_state = &cik_vga_set_state,
.get_xclk = &cik_get_xclk,
.set_uvd_clocks = &cik_set_uvd_clocks,
.set_vce_clocks = &cik_set_vce_clocks,
.get_config_memsize = &cik_get_config_memsize,
.flush_hdp = &cik_flush_hdp,
.invalidate_hdp = &cik_invalidate_hdp,
.need_full_reset = &cik_need_full_reset,
.init_doorbell_index = &legacy_doorbell_index_init,
.get_pcie_usage = &cik_get_pcie_usage,
.need_reset_on_init = &cik_need_reset_on_init,
.get_pcie_replay_count = &cik_get_pcie_replay_count,
.supports_baco = &cik_asic_supports_baco,
.pre_asic_init = &cik_pre_asic_init,
.query_video_codecs = &cik_query_video_codecs,
};
static int cik_common_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->smc_rreg = &cik_smc_rreg;
adev->smc_wreg = &cik_smc_wreg;
adev->pcie_rreg = &cik_pcie_rreg;
adev->pcie_wreg = &cik_pcie_wreg;
adev->uvd_ctx_rreg = &cik_uvd_ctx_rreg;
adev->uvd_ctx_wreg = &cik_uvd_ctx_wreg;
adev->didt_rreg = &cik_didt_rreg;
adev->didt_wreg = &cik_didt_wreg;
adev->asic_funcs = &cik_asic_funcs;
adev->rev_id = cik_get_rev_id(adev);
adev->external_rev_id = 0xFF;
switch (adev->asic_type) {
case CHIP_BONAIRE:
adev->cg_flags =
AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
/*AMD_CG_SUPPORT_GFX_CGCG |*/
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_GFX_CGTS |
AMD_CG_SUPPORT_GFX_CGTS_LS |
AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_MC_MGCG |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_VCE_MGCG |
AMD_CG_SUPPORT_UVD_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_HDP_MGCG;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x14;
break;
case CHIP_HAWAII:
adev->cg_flags =
AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
/*AMD_CG_SUPPORT_GFX_CGCG |*/
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_GFX_CGTS |
AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_MC_MGCG |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_VCE_MGCG |
AMD_CG_SUPPORT_UVD_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_HDP_MGCG;
adev->pg_flags = 0;
adev->external_rev_id = 0x28;
break;
case CHIP_KAVERI:
adev->cg_flags =
AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
/*AMD_CG_SUPPORT_GFX_CGCG |*/
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_GFX_CGTS |
AMD_CG_SUPPORT_GFX_CGTS_LS |
AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_VCE_MGCG |
AMD_CG_SUPPORT_UVD_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_HDP_MGCG;
adev->pg_flags =
/*AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_GFX_DMG |*/
AMD_PG_SUPPORT_UVD |
AMD_PG_SUPPORT_VCE |
/* AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_GDS |
AMD_PG_SUPPORT_RLC_SMU_HS |
AMD_PG_SUPPORT_ACP |
AMD_PG_SUPPORT_SAMU |*/
0;
if (adev->pdev->device == 0x1312 ||
adev->pdev->device == 0x1316 ||
adev->pdev->device == 0x1317)
adev->external_rev_id = 0x41;
else
adev->external_rev_id = 0x1;
break;
case CHIP_KABINI:
case CHIP_MULLINS:
adev->cg_flags =
AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
/*AMD_CG_SUPPORT_GFX_CGCG |*/
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_GFX_CGTS |
AMD_CG_SUPPORT_GFX_CGTS_LS |
AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_VCE_MGCG |
AMD_CG_SUPPORT_UVD_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_HDP_MGCG;
adev->pg_flags =
/*AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_GFX_SMG | */
AMD_PG_SUPPORT_UVD |
/*AMD_PG_SUPPORT_VCE |
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_GDS |
AMD_PG_SUPPORT_RLC_SMU_HS |
AMD_PG_SUPPORT_SAMU |*/
0;
if (adev->asic_type == CHIP_KABINI) {
if (adev->rev_id == 0)
adev->external_rev_id = 0x81;
else if (adev->rev_id == 1)
adev->external_rev_id = 0x82;
else if (adev->rev_id == 2)
adev->external_rev_id = 0x85;
} else
adev->external_rev_id = adev->rev_id + 0xa1;
break;
default:
/* FIXME: not supported yet */
return -EINVAL;
}
return 0;
}
static int cik_common_sw_init(void *handle)
{
return 0;
}
static int cik_common_sw_fini(void *handle)
{
return 0;
}
static int cik_common_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* move the golden regs per IP block */
cik_init_golden_registers(adev);
/* enable pcie gen2/3 link */
cik_pcie_gen3_enable(adev);
/* enable aspm */
cik_program_aspm(adev);
return 0;
}
static int cik_common_hw_fini(void *handle)
{
return 0;
}
static int cik_common_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return cik_common_hw_fini(adev);
}
static int cik_common_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return cik_common_hw_init(adev);
}
static bool cik_common_is_idle(void *handle)
{
return true;
}
static int cik_common_wait_for_idle(void *handle)
{
return 0;
}
static int cik_common_soft_reset(void *handle)
{
/* XXX hard reset?? */
return 0;
}
static int cik_common_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
return 0;
}
static int cik_common_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
return 0;
}
static const struct amd_ip_funcs cik_common_ip_funcs = {
.name = "cik_common",
.early_init = cik_common_early_init,
.late_init = NULL,
.sw_init = cik_common_sw_init,
.sw_fini = cik_common_sw_fini,
.hw_init = cik_common_hw_init,
.hw_fini = cik_common_hw_fini,
.suspend = cik_common_suspend,
.resume = cik_common_resume,
.is_idle = cik_common_is_idle,
.wait_for_idle = cik_common_wait_for_idle,
.soft_reset = cik_common_soft_reset,
.set_clockgating_state = cik_common_set_clockgating_state,
.set_powergating_state = cik_common_set_powergating_state,
};
static const struct amdgpu_ip_block_version cik_common_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_COMMON,
.major = 1,
.minor = 0,
.rev = 0,
.funcs = &cik_common_ip_funcs,
};
int cik_set_ip_blocks(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_BONAIRE:
amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block);
amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
amdgpu_device_ip_block_add(adev, &dce_v8_2_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
break;
case CHIP_HAWAII:
amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
amdgpu_device_ip_block_add(adev, &gfx_v7_3_ip_block);
amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
amdgpu_device_ip_block_add(adev, &dce_v8_5_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
break;
case CHIP_KAVERI:
amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
amdgpu_device_ip_block_add(adev, &gfx_v7_1_ip_block);
amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
amdgpu_device_ip_block_add(adev, &kv_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
amdgpu_device_ip_block_add(adev, &dce_v8_1_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
break;
case CHIP_KABINI:
case CHIP_MULLINS:
amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block);
amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
amdgpu_device_ip_block_add(adev, &kv_smu_ip_block);
if (adev->enable_virtual_display)
amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
amdgpu_device_ip_block_add(adev, &dce_v8_3_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
break;
default:
/* FIXME: not supported yet */
return -EINVAL;
}
return 0;
}
| linux-master | drivers/gpu/drm/amd/amdgpu/cik.c |
/*
* Copyright 2013 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Alex Deucher
*/
#include <linux/firmware.h>
#include <linux/module.h>
#include "amdgpu.h"
#include "amdgpu_ucode.h"
#include "amdgpu_trace.h"
#include "cikd.h"
#include "cik.h"
#include "bif/bif_4_1_d.h"
#include "bif/bif_4_1_sh_mask.h"
#include "gca/gfx_7_2_d.h"
#include "gca/gfx_7_2_enum.h"
#include "gca/gfx_7_2_sh_mask.h"
#include "gmc/gmc_7_1_d.h"
#include "gmc/gmc_7_1_sh_mask.h"
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
{
SDMA0_REGISTER_OFFSET,
SDMA1_REGISTER_OFFSET
};
static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
static int cik_sdma_soft_reset(void *handle);
MODULE_FIRMWARE("amdgpu/bonaire_sdma.bin");
MODULE_FIRMWARE("amdgpu/bonaire_sdma1.bin");
MODULE_FIRMWARE("amdgpu/hawaii_sdma.bin");
MODULE_FIRMWARE("amdgpu/hawaii_sdma1.bin");
MODULE_FIRMWARE("amdgpu/kaveri_sdma.bin");
MODULE_FIRMWARE("amdgpu/kaveri_sdma1.bin");
MODULE_FIRMWARE("amdgpu/kabini_sdma.bin");
MODULE_FIRMWARE("amdgpu/kabini_sdma1.bin");
MODULE_FIRMWARE("amdgpu/mullins_sdma.bin");
MODULE_FIRMWARE("amdgpu/mullins_sdma1.bin");
u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
static void cik_sdma_free_microcode(struct amdgpu_device *adev)
{
int i;
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
/*
* sDMA - System DMA
* Starting with CIK, the GPU has new asynchronous
* DMA engines. These engines are used for compute
* and gfx. There are two DMA engines (SDMA0, SDMA1)
* and each one supports 1 ring buffer used for gfx
* and 2 queues used for compute.
*
* The programming model is very similar to the CP
* (ring buffer, IBs, etc.), but sDMA has it's own
* packet format that is different from the PM4 format
* used by the CP. sDMA supports copying data, writing
* embedded data, solid fills, and a number of other
* things. It also has support for tiling/detiling of
* buffers.
*/
/**
* cik_sdma_init_microcode - load ucode images from disk
*
* @adev: amdgpu_device pointer
*
* Use the firmware interface to load the ucode images into
* the driver (not loaded into hw).
* Returns 0 on success, error on failure.
*/
static int cik_sdma_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
char fw_name[30];
int err = 0, i;
DRM_DEBUG("\n");
switch (adev->asic_type) {
case CHIP_BONAIRE:
chip_name = "bonaire";
break;
case CHIP_HAWAII:
chip_name = "hawaii";
break;
case CHIP_KAVERI:
chip_name = "kaveri";
break;
case CHIP_KABINI:
chip_name = "kabini";
break;
case CHIP_MULLINS:
chip_name = "mullins";
break;
default: BUG();
}
for (i = 0; i < adev->sdma.num_instances; i++) {
if (i == 0)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, fw_name);
if (err)
goto out;
}
out:
if (err) {
pr_err("cik_sdma: Failed to load firmware \"%s\"\n", fw_name);
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
return err;
}
/**
* cik_sdma_ring_get_rptr - get the current read pointer
*
* @ring: amdgpu ring pointer
*
* Get the current rptr from the hardware (CIK+).
*/
static uint64_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring)
{
u32 rptr;
rptr = *ring->rptr_cpu_addr;
return (rptr & 0x3fffc) >> 2;
}
/**
* cik_sdma_ring_get_wptr - get the current write pointer
*
* @ring: amdgpu ring pointer
*
* Get the current wptr from the hardware (CIK+).
*/
static uint64_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) & 0x3fffc) >> 2;
}
/**
* cik_sdma_ring_set_wptr - commit the write pointer
*
* @ring: amdgpu ring pointer
*
* Write the wptr back to the hardware (CIK+).
*/
static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me],
(ring->wptr << 2) & 0x3fffc);
}
static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
int i;
for (i = 0; i < count; i++)
if (sdma && sdma->burst_nop && (i == 0))
amdgpu_ring_write(ring, ring->funcs->nop |
SDMA_NOP_COUNT(count - 1));
else
amdgpu_ring_write(ring, ring->funcs->nop);
}
/**
* cik_sdma_ring_emit_ib - Schedule an IB on the DMA engine
*
* @ring: amdgpu ring pointer
* @job: job to retrive vmid from
* @ib: IB object to schedule
* @flags: unused
*
* Schedule an IB in the DMA ring (CIK).
*/
static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_job *job,
struct amdgpu_ib *ib,
uint32_t flags)
{
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 extra_bits = vmid & 0xf;
/* IB packet must end on a 8 DW boundary */
cik_sdma_ring_insert_nop(ring, (4 - lower_32_bits(ring->wptr)) & 7);
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
amdgpu_ring_write(ring, ib->length_dw);
}
/**
* cik_sdma_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
*
* @ring: amdgpu ring pointer
*
* Emit an hdp flush packet on the requested DMA ring.
*/
static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
u32 ref_and_mask;
if (ring->me == 0)
ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK;
else
ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK;
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
amdgpu_ring_write(ring, ref_and_mask); /* reference */
amdgpu_ring_write(ring, ref_and_mask); /* mask */
amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
}
/**
* cik_sdma_ring_emit_fence - emit a fence on the DMA ring
*
* @ring: amdgpu ring pointer
* @addr: address
* @seq: sequence number
* @flags: fence related flags
*
* Add a DMA fence packet to the ring to write
* the fence seq number and DMA trap packet to generate
* an interrupt if needed (CIK).
*/
static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags)
{
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
/* write the fence */
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, lower_32_bits(seq));
/* optionally write high bits as well */
if (write64bit) {
addr += 4;
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(seq));
}
/* generate an interrupt */
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
}
/**
* cik_sdma_gfx_stop - stop the gfx async dma engines
*
* @adev: amdgpu_device pointer
*
* Stop the gfx async dma ring buffers (CIK).
*/
static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
{
u32 rb_cntl;
int i;
amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK;
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0);
}
}
/**
* cik_sdma_rlc_stop - stop the compute async dma engines
*
* @adev: amdgpu_device pointer
*
* Stop the compute async dma queues (CIK).
*/
static void cik_sdma_rlc_stop(struct amdgpu_device *adev)
{
/* XXX todo */
}
/**
* cik_ctx_switch_enable - stop the async dma engines context switch
*
* @adev: amdgpu_device pointer
* @enable: enable/disable the DMA MEs context switch.
*
* Halt or unhalt the async dma engines context switch (VI).
*/
static void cik_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
{
u32 f32_cntl, phase_quantum = 0;
int i;
if (amdgpu_sdma_phase_quantum) {
unsigned value = amdgpu_sdma_phase_quantum;
unsigned unit = 0;
while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
value = (value + 1) >> 1;
unit++;
}
if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
WARN_ONCE(1,
"clamping sdma_phase_quantum to %uK clock cycles\n",
value << unit);
}
phase_quantum =
value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
}
for (i = 0; i < adev->sdma.num_instances; i++) {
f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]);
if (enable) {
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
AUTO_CTXSW_ENABLE, 1);
if (amdgpu_sdma_phase_quantum) {
WREG32(mmSDMA0_PHASE0_QUANTUM + sdma_offsets[i],
phase_quantum);
WREG32(mmSDMA0_PHASE1_QUANTUM + sdma_offsets[i],
phase_quantum);
}
} else {
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
AUTO_CTXSW_ENABLE, 0);
}
WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl);
}
}
/**
* cik_sdma_enable - stop the async dma engines
*
* @adev: amdgpu_device pointer
* @enable: enable/disable the DMA MEs.
*
* Halt or unhalt the async dma engines (CIK).
*/
static void cik_sdma_enable(struct amdgpu_device *adev, bool enable)
{
u32 me_cntl;
int i;
if (!enable) {
cik_sdma_gfx_stop(adev);
cik_sdma_rlc_stop(adev);
}
for (i = 0; i < adev->sdma.num_instances; i++) {
me_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
if (enable)
me_cntl &= ~SDMA0_F32_CNTL__HALT_MASK;
else
me_cntl |= SDMA0_F32_CNTL__HALT_MASK;
WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], me_cntl);
}
}
/**
* cik_sdma_gfx_resume - setup and start the async dma engines
*
* @adev: amdgpu_device pointer
*
* Set up the gfx DMA ring buffers and enable them (CIK).
* Returns 0 for success, error for failure.
*/
static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
u32 rb_cntl, ib_cntl;
u32 rb_bufsz;
int i, j, r;
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
mutex_lock(&adev->srbm_mutex);
for (j = 0; j < 16; j++) {
cik_srbm_select(adev, 0, 0, 0, j);
/* SDMA GFX */
WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
/* XXX SDMA RLC - todo */
}
cik_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
adev->gfx.config.gb_addr_config & 0x70);
WREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
/* Set ring buffer size in dwords */
rb_bufsz = order_base_2(ring->ring_size / 4);
rb_cntl = rb_bufsz << 1;
#ifdef __BIG_ENDIAN
rb_cntl |= SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK |
SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK;
#endif
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
/* Initialize the ring buffer's read and write pointers */
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
/* set the wb address whether it's enabled or not */
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
((ring->rptr_gpu_addr) & 0xFFFFFFFC));
rb_cntl |= SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK;
WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
ring->wptr = 0;
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
/* enable DMA RB */
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i],
rb_cntl | SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK);
ib_cntl = SDMA0_GFX_IB_CNTL__IB_ENABLE_MASK;
#ifdef __BIG_ENDIAN
ib_cntl |= SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK;
#endif
/* enable DMA IBs */
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
}
cik_sdma_enable(adev, true);
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
return 0;
}
/**
* cik_sdma_rlc_resume - setup and start the async dma engines
*
* @adev: amdgpu_device pointer
*
* Set up the compute DMA queues and enable them (CIK).
* Returns 0 for success, error for failure.
*/
static int cik_sdma_rlc_resume(struct amdgpu_device *adev)
{
/* XXX todo */
return 0;
}
/**
* cik_sdma_load_microcode - load the sDMA ME ucode
*
* @adev: amdgpu_device pointer
*
* Loads the sDMA0/1 ucode.
* Returns 0 for success, -EINVAL if the ucode is not available.
*/
static int cik_sdma_load_microcode(struct amdgpu_device *adev)
{
const struct sdma_firmware_header_v1_0 *hdr;
const __le32 *fw_data;
u32 fw_size;
int i, j;
/* halt the MEs */
cik_sdma_enable(adev, false);
for (i = 0; i < adev->sdma.num_instances; i++) {
if (!adev->sdma.instance[i].fw)
return -EINVAL;
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
amdgpu_ucode_print_sdma_hdr(&hdr->header);
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
if (adev->sdma.instance[i].feature_version >= 20)
adev->sdma.instance[i].burst_nop = true;
fw_data = (const __le32 *)
(adev->sdma.instance[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
for (j = 0; j < fw_size; j++)
WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
}
return 0;
}
/**
* cik_sdma_start - setup and start the async dma engines
*
* @adev: amdgpu_device pointer
*
* Set up the DMA engines and enable them (CIK).
* Returns 0 for success, error for failure.
*/
static int cik_sdma_start(struct amdgpu_device *adev)
{
int r;
r = cik_sdma_load_microcode(adev);
if (r)
return r;
/* halt the engine before programing */
cik_sdma_enable(adev, false);
/* enable sdma ring preemption */
cik_ctx_switch_enable(adev, true);
/* start the gfx rings and rlc compute queues */
r = cik_sdma_gfx_resume(adev);
if (r)
return r;
r = cik_sdma_rlc_resume(adev);
if (r)
return r;
return 0;
}
/**
* cik_sdma_ring_test_ring - simple async dma engine test
*
* @ring: amdgpu_ring structure holding ring information
*
* Test the DMA engine by writing using it to write an
* value to memory. (CIK).
* Returns 0 for success, error for failure.
*/
static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
unsigned i;
unsigned index;
int r;
u32 tmp;
u64 gpu_addr;
r = amdgpu_device_wb_get(adev, &index);
if (r)
return r;
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
r = amdgpu_ring_alloc(ring, 5);
if (r)
goto error_free_wb;
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
amdgpu_ring_write(ring, 1); /* number of DWs to follow */
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
udelay(1);
}
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
error_free_wb:
amdgpu_device_wb_free(adev, index);
return r;
}
/**
* cik_sdma_ring_test_ib - test an IB on the DMA engine
*
* @ring: amdgpu_ring structure holding ring information
* @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
*
* Test a simple IB in the DMA ring (CIK).
* Returns 0 on success, error on failure.
*/
static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct dma_fence *f = NULL;
unsigned index;
u32 tmp = 0;
u64 gpu_addr;
long r;
r = amdgpu_device_wb_get(adev, &index);
if (r)
return r;
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256,
AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err0;
ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE,
SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
ib.ptr[1] = lower_32_bits(gpu_addr);
ib.ptr[2] = upper_32_bits(gpu_addr);
ib.ptr[3] = 1;
ib.ptr[4] = 0xDEADBEEF;
ib.length_dw = 5;
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
goto err1;
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
r = -ETIMEDOUT;
goto err1;
} else if (r < 0) {
goto err1;
}
tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
r = 0;
else
r = -EINVAL;
err1:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err0:
amdgpu_device_wb_free(adev, index);
return r;
}
/**
* cik_sdma_vm_copy_pte - update PTEs by copying them from the GART
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @src: src addr to copy from
* @count: number of page entries to update
*
* Update PTEs by copying them from the GART using sDMA (CIK).
*/
static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib,
uint64_t pe, uint64_t src,
unsigned count)
{
unsigned bytes = count * 8;
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
ib->ptr[ib->length_dw++] = bytes;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src);
ib->ptr[ib->length_dw++] = upper_32_bits(src);
ib->ptr[ib->length_dw++] = lower_32_bits(pe);
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
}
/**
* cik_sdma_vm_write_pte - update PTEs by writing them manually
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @value: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
*
* Update PTEs by writing them manually using sDMA (CIK).
*/
static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
uint64_t value, unsigned count,
uint32_t incr)
{
unsigned ndw = count * 2;
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
ib->ptr[ib->length_dw++] = lower_32_bits(pe);
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = ndw;
for (; ndw > 0; ndw -= 2) {
ib->ptr[ib->length_dw++] = lower_32_bits(value);
ib->ptr[ib->length_dw++] = upper_32_bits(value);
value += incr;
}
}
/**
* cik_sdma_vm_set_pte_pde - update the page tables using sDMA
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: access flags
*
* Update the page tables using sDMA (CIK).
*/
static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint64_t flags)
{
/* for physically contiguous pages (vram) */
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
ib->ptr[ib->length_dw++] = upper_32_bits(flags);
ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
ib->ptr[ib->length_dw++] = incr; /* increment size */
ib->ptr[ib->length_dw++] = 0;
ib->ptr[ib->length_dw++] = count; /* number of entries */
}
/**
* cik_sdma_ring_pad_ib - pad the IB to the required number of dw
*
* @ring: amdgpu_ring structure holding ring information
* @ib: indirect buffer to fill with padding
*
*/
static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
u32 pad_count;
int i;
pad_count = (-ib->length_dw) & 7;
for (i = 0; i < pad_count; i++)
if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =
SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0) |
SDMA_NOP_COUNT(pad_count - 1);
else
ib->ptr[ib->length_dw++] =
SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
}
/**
* cik_sdma_ring_emit_pipeline_sync - sync the pipeline
*
* @ring: amdgpu_ring pointer
*
* Make sure all previous operations are completed (CIK).
*/
static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
/* wait for idle */
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0,
SDMA_POLL_REG_MEM_EXTRA_OP(0) |
SDMA_POLL_REG_MEM_EXTRA_FUNC(3) | /* equal */
SDMA_POLL_REG_MEM_EXTRA_M));
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); /* reference */
amdgpu_ring_write(ring, 0xffffffff); /* mask */
amdgpu_ring_write(ring, (0xfff << 16) | 4); /* retry count, poll interval */
}
/**
* cik_sdma_ring_emit_vm_flush - cik vm flush using sDMA
*
* @ring: amdgpu_ring pointer
* @vmid: vmid number to use
* @pd_addr: address
*
* Update the page table base and flush the VM TLB
* using sDMA (CIK).
*/
static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 0); /* reference */
amdgpu_ring_write(ring, 0); /* mask */
amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
}
static void cik_sdma_ring_emit_wreg(struct amdgpu_ring *ring,
uint32_t reg, uint32_t val)
{
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, val);
}
static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
bool enable)
{
u32 orig, data;
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
} else {
orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET);
data |= 0xff000000;
if (data != orig)
WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, data);
orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET);
data |= 0xff000000;
if (data != orig)
WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, data);
}
}
static void cik_enable_sdma_mgls(struct amdgpu_device *adev,
bool enable)
{
u32 orig, data;
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
data |= 0x100;
if (orig != data)
WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
data |= 0x100;
if (orig != data)
WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
} else {
orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
data &= ~0x100;
if (orig != data)
WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
data &= ~0x100;
if (orig != data)
WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
}
}
static int cik_sdma_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->sdma.num_instances = SDMA_MAX_INSTANCE;
cik_sdma_set_ring_funcs(adev);
cik_sdma_set_irq_funcs(adev);
cik_sdma_set_buffer_funcs(adev);
cik_sdma_set_vm_pte_funcs(adev);
return 0;
}
static int cik_sdma_sw_init(void *handle)
{
struct amdgpu_ring *ring;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r, i;
r = cik_sdma_init_microcode(adev);
if (r) {
DRM_ERROR("Failed to load sdma firmware!\n");
return r;
}
/* SDMA trap event */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224,
&adev->sdma.trap_irq);
if (r)
return r;
/* SDMA Privileged inst */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
/* SDMA Privileged inst */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 247,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
(i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
AMDGPU_SDMA_IRQ_INSTANCE1,
AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
return r;
}
static int cik_sdma_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
cik_sdma_free_microcode(adev);
return 0;
}
static int cik_sdma_hw_init(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = cik_sdma_start(adev);
if (r)
return r;
return r;
}
static int cik_sdma_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
cik_ctx_switch_enable(adev, false);
cik_sdma_enable(adev, false);
return 0;
}
static int cik_sdma_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return cik_sdma_hw_fini(adev);
}
static int cik_sdma_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
cik_sdma_soft_reset(handle);
return cik_sdma_hw_init(adev);
}
static bool cik_sdma_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 tmp = RREG32(mmSRBM_STATUS2);
if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
SRBM_STATUS2__SDMA1_BUSY_MASK))
return false;
return true;
}
static int cik_sdma_wait_for_idle(void *handle)
{
unsigned i;
u32 tmp;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
SRBM_STATUS2__SDMA1_BUSY_MASK);
if (!tmp)
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
static int cik_sdma_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 tmp;
/* sdma0 */
tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
tmp |= SDMA0_F32_CNTL__HALT_MASK;
WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
/* sdma1 */
tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
tmp |= SDMA0_F32_CNTL__HALT_MASK;
WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
if (srbm_soft_reset) {
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(mmSRBM_SOFT_RESET, tmp);
tmp = RREG32(mmSRBM_SOFT_RESET);
udelay(50);
tmp &= ~srbm_soft_reset;
WREG32(mmSRBM_SOFT_RESET, tmp);
tmp = RREG32(mmSRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
}
return 0;
}
static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
{
u32 sdma_cntl;
switch (type) {
case AMDGPU_SDMA_IRQ_INSTANCE0:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK;
WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK;
WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
break;
default:
break;
}
break;
case AMDGPU_SDMA_IRQ_INSTANCE1:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK;
WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK;
WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
break;
default:
break;
}
break;
default:
break;
}
return 0;
}
static int cik_sdma_process_trap_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
u8 instance_id, queue_id;
instance_id = (entry->ring_id & 0x3) >> 0;
queue_id = (entry->ring_id & 0xc) >> 2;
DRM_DEBUG("IH: SDMA trap\n");
switch (instance_id) {
case 0:
switch (queue_id) {
case 0:
amdgpu_fence_process(&adev->sdma.instance[0].ring);
break;
case 1:
/* XXX compute */
break;
case 2:
/* XXX compute */
break;
}
break;
case 1:
switch (queue_id) {
case 0:
amdgpu_fence_process(&adev->sdma.instance[1].ring);
break;
case 1:
/* XXX compute */
break;
case 2:
/* XXX compute */
break;
}
break;
}
return 0;
}
static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
u8 instance_id;
DRM_ERROR("Illegal instruction in SDMA command stream\n");
instance_id = (entry->ring_id & 0x3) >> 0;
drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
return 0;
}
static int cik_sdma_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
bool gate = false;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (state == AMD_CG_STATE_GATE)
gate = true;
cik_enable_sdma_mgcg(adev, gate);
cik_enable_sdma_mgls(adev, gate);
return 0;
}
static int cik_sdma_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
return 0;
}
static const struct amd_ip_funcs cik_sdma_ip_funcs = {
.name = "cik_sdma",
.early_init = cik_sdma_early_init,
.late_init = NULL,
.sw_init = cik_sdma_sw_init,
.sw_fini = cik_sdma_sw_fini,
.hw_init = cik_sdma_hw_init,
.hw_fini = cik_sdma_hw_fini,
.suspend = cik_sdma_suspend,
.resume = cik_sdma_resume,
.is_idle = cik_sdma_is_idle,
.wait_for_idle = cik_sdma_wait_for_idle,
.soft_reset = cik_sdma_soft_reset,
.set_clockgating_state = cik_sdma_set_clockgating_state,
.set_powergating_state = cik_sdma_set_powergating_state,
};
static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
.type = AMDGPU_RING_TYPE_SDMA,
.align_mask = 0xf,
.nop = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0),
.support_64bit_ptrs = false,
.get_rptr = cik_sdma_ring_get_rptr,
.get_wptr = cik_sdma_ring_get_wptr,
.set_wptr = cik_sdma_ring_set_wptr,
.emit_frame_size =
6 + /* cik_sdma_ring_emit_hdp_flush */
3 + /* hdp invalidate */
6 + /* cik_sdma_ring_emit_pipeline_sync */
CIK_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* cik_sdma_ring_emit_vm_flush */
9 + 9 + 9, /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 4, /* cik_sdma_ring_emit_ib */
.emit_ib = cik_sdma_ring_emit_ib,
.emit_fence = cik_sdma_ring_emit_fence,
.emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync,
.emit_vm_flush = cik_sdma_ring_emit_vm_flush,
.emit_hdp_flush = cik_sdma_ring_emit_hdp_flush,
.test_ring = cik_sdma_ring_test_ring,
.test_ib = cik_sdma_ring_test_ib,
.insert_nop = cik_sdma_ring_insert_nop,
.pad_ib = cik_sdma_ring_pad_ib,
.emit_wreg = cik_sdma_ring_emit_wreg,
};
static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.instance[i].ring.funcs = &cik_sdma_ring_funcs;
adev->sdma.instance[i].ring.me = i;
}
}
static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = {
.set = cik_sdma_set_trap_irq_state,
.process = cik_sdma_process_trap_irq,
};
static const struct amdgpu_irq_src_funcs cik_sdma_illegal_inst_irq_funcs = {
.process = cik_sdma_process_illegal_inst_irq,
};
static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev)
{
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
adev->sdma.trap_irq.funcs = &cik_sdma_trap_irq_funcs;
adev->sdma.illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs;
}
/**
* cik_sdma_emit_copy_buffer - copy buffer using the sDMA engine
*
* @ib: indirect buffer to copy to
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
* @tmz: is this a secure operation
*
* Copy GPU buffers using the DMA engine (CIK).
* Used by the amdgpu ttm implementation to move pages if
* registered as the asic copy callback.
*/
static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
uint32_t byte_count,
bool tmz)
{
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0);
ib->ptr[ib->length_dw++] = byte_count;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
}
/**
* cik_sdma_emit_fill_buffer - fill buffer using the sDMA engine
*
* @ib: indirect buffer to fill
* @src_data: value to write to buffer
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
*
* Fill GPU buffers using the DMA engine (CIK).
*/
static void cik_sdma_emit_fill_buffer(struct amdgpu_ib *ib,
uint32_t src_data,
uint64_t dst_offset,
uint32_t byte_count)
{
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0, 0);
ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = src_data;
ib->ptr[ib->length_dw++] = byte_count;
}
static const struct amdgpu_buffer_funcs cik_sdma_buffer_funcs = {
.copy_max_bytes = 0x1fffff,
.copy_num_dw = 7,
.emit_copy_buffer = cik_sdma_emit_copy_buffer,
.fill_max_bytes = 0x1fffff,
.fill_num_dw = 5,
.emit_fill_buffer = cik_sdma_emit_fill_buffer,
};
static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
{
adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
.copy_pte_num_dw = 7,
.copy_pte = cik_sdma_vm_copy_pte,
.write_pte = cik_sdma_vm_write_pte,
.set_pte_pde = cik_sdma_vm_set_pte_pde,
};
static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
{
unsigned i;
adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) {
adev->vm_manager.vm_pte_scheds[i] =
&adev->sdma.instance[i].ring.sched;
}
adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version cik_sdma_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_SDMA,
.major = 2,
.minor = 0,
.rev = 0,
.funcs = &cik_sdma_ip_funcs,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/cik_sdma.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "nbio_v7_4.h"
#include "amdgpu_ras.h"
#include "nbio/nbio_7_4_offset.h"
#include "nbio/nbio_7_4_sh_mask.h"
#include "nbio/nbio_7_4_0_smn.h"
#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
#include <uapi/linux/kfd_ioctl.h>
#define smnPCIE_LC_CNTL 0x11140280
#define smnPCIE_LC_CNTL3 0x111402d4
#define smnPCIE_LC_CNTL6 0x111402ec
#define smnPCIE_LC_CNTL7 0x111402f0
#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c
#define smnRCC_BIF_STRAP3 0x1012348c
#define RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK 0x0000FFFFL
#define RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK 0xFFFF0000L
#define smnRCC_BIF_STRAP5 0x10123494
#define RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK 0x0000FFFFL
#define smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2 0x1014008c
#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
#define smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP 0x10140324
#define smnPSWUSP0_PCIE_LC_CNTL2 0x111402c4
#define smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL 0x10123538
#define smnRCC_BIF_STRAP2 0x10123488
#define RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK 0x00004000L
#define RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT 0x0
#define RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT 0x10
#define RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT 0x0
/*
* These are nbio v7_4_1 registers mask. Temporarily define these here since
* nbio v7_4_1 header is incomplete.
*/
#define GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L /* Don't use. Firmware uses this bit internally */
#define GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
#define GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
#define GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
#define GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
#define GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
#define GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
#define GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
#define GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
#define mmBIF_MMSCH1_DOORBELL_RANGE 0x01dc
#define mmBIF_MMSCH1_DOORBELL_RANGE_BASE_IDX 2
//BIF_MMSCH1_DOORBELL_RANGE
#define BIF_MMSCH1_DOORBELL_RANGE__OFFSET__SHIFT 0x2
#define BIF_MMSCH1_DOORBELL_RANGE__SIZE__SHIFT 0x10
#define BIF_MMSCH1_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL
#define BIF_MMSCH1_DOORBELL_RANGE__SIZE_MASK 0x001F0000L
#define BIF_MMSCH1_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL
#define BIF_MMSCH1_DOORBELL_RANGE__SIZE_MASK 0x001F0000L
#define mmBIF_MMSCH1_DOORBELL_RANGE_ALDE 0x01d8
#define mmBIF_MMSCH1_DOORBELL_RANGE_ALDE_BASE_IDX 2
//BIF_MMSCH1_DOORBELL_ALDE_RANGE
#define BIF_MMSCH1_DOORBELL_RANGE_ALDE__OFFSET__SHIFT 0x2
#define BIF_MMSCH1_DOORBELL_RANGE_ALDE__SIZE__SHIFT 0x10
#define BIF_MMSCH1_DOORBELL_RANGE_ALDE__OFFSET_MASK 0x00000FFCL
#define BIF_MMSCH1_DOORBELL_RANGE_ALDE__SIZE_MASK 0x001F0000L
#define mmRCC_DEV0_EPF0_STRAP0_ALDE 0x0015
#define mmRCC_DEV0_EPF0_STRAP0_ALDE_BASE_IDX 2
#define mmBIF_DOORBELL_INT_CNTL_ALDE 0x00fe
#define mmBIF_DOORBELL_INT_CNTL_ALDE_BASE_IDX 2
#define BIF_DOORBELL_INT_CNTL_ALDE__DOORBELL_INTERRUPT_DISABLE__SHIFT 0x18
#define BIF_DOORBELL_INT_CNTL_ALDE__DOORBELL_INTERRUPT_DISABLE_MASK 0x01000000L
#define mmBIF_INTR_CNTL_ALDE 0x0101
#define mmBIF_INTR_CNTL_ALDE_BASE_IDX 2
static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status);
static void nbio_v7_4_remap_hdp_registers(struct amdgpu_device *adev)
{
WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL,
adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
}
static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev)
{
u32 tmp;
if (adev->asic_type == CHIP_ALDEBARAN)
tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0_ALDE);
else
tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
return tmp;
}
static void nbio_v7_4_mc_access_enable(struct amdgpu_device *adev, bool enable)
{
if (enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
else
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
}
static u32 nbio_v7_4_get_memsize(struct amdgpu_device *adev)
{
return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE);
}
static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
bool use_doorbell, int doorbell_index, int doorbell_size)
{
u32 reg, doorbell_range;
if (instance < 2) {
reg = instance +
SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE);
} else {
/*
* These registers address of SDMA2~7 is not consecutive
* from SDMA0~1. Need plus 4 dwords offset.
*
* BIF_SDMA0_DOORBELL_RANGE: 0x3bc0
* BIF_SDMA1_DOORBELL_RANGE: 0x3bc4
* BIF_SDMA2_DOORBELL_RANGE: 0x3bd8
+ * BIF_SDMA4_DOORBELL_RANGE:
+ * ARCTURUS: 0x3be0
+ * ALDEBARAN: 0x3be4
*/
if (adev->asic_type == CHIP_ALDEBARAN && instance == 4)
reg = instance + 0x4 + 0x1 +
SOC15_REG_OFFSET(NBIO, 0,
mmBIF_SDMA0_DOORBELL_RANGE);
else
reg = instance + 0x4 +
SOC15_REG_OFFSET(NBIO, 0,
mmBIF_SDMA0_DOORBELL_RANGE);
}
doorbell_range = RREG32(reg);
if (use_doorbell) {
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, doorbell_size);
} else
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
WREG32(reg, doorbell_range);
}
static void nbio_v7_4_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell,
int doorbell_index, int instance)
{
u32 reg;
u32 doorbell_range;
if (instance) {
if (adev->asic_type == CHIP_ALDEBARAN)
reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH1_DOORBELL_RANGE_ALDE);
else
reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH1_DOORBELL_RANGE);
} else
reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH0_DOORBELL_RANGE);
doorbell_range = RREG32(reg);
if (use_doorbell) {
doorbell_range = REG_SET_FIELD(doorbell_range,
BIF_MMSCH0_DOORBELL_RANGE, OFFSET,
doorbell_index);
doorbell_range = REG_SET_FIELD(doorbell_range,
BIF_MMSCH0_DOORBELL_RANGE, SIZE, 8);
} else
doorbell_range = REG_SET_FIELD(doorbell_range,
BIF_MMSCH0_DOORBELL_RANGE, SIZE, 0);
WREG32(reg, doorbell_range);
}
static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev,
bool enable)
{
WREG32_FIELD15(NBIO, 0, RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0);
}
static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
bool enable)
{
u32 tmp = 0;
if (enable) {
tmp = REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) |
REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) |
REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0);
WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_LOW,
lower_32_bits(adev->doorbell.base));
WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH,
upper_32_bits(adev->doorbell.base));
}
WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_CNTL, tmp);
}
static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev,
bool use_doorbell, int doorbell_index)
{
u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE);
if (use_doorbell) {
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 8);
} else
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range);
}
static void nbio_v7_4_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
//TODO: Add support for v7.4
}
static void nbio_v7_4_update_medium_grain_light_sleep(struct amdgpu_device *adev,
bool enable)
{
uint32_t def, data;
def = data = RREG32_PCIE(smnPCIE_CNTL2);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
PCIE_CNTL2__MST_MEM_LS_EN_MASK |
PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
} else {
data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
PCIE_CNTL2__MST_MEM_LS_EN_MASK |
PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
}
if (def != data)
WREG32_PCIE(smnPCIE_CNTL2, data);
}
static void nbio_v7_4_get_clockgating_state(struct amdgpu_device *adev,
u64 *flags)
{
int data;
/* AMD_CG_SUPPORT_BIF_MGCG */
data = RREG32_PCIE(smnCPM_CONTROL);
if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
*flags |= AMD_CG_SUPPORT_BIF_MGCG;
/* AMD_CG_SUPPORT_BIF_LS */
data = RREG32_PCIE(smnPCIE_CNTL2);
if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
*flags |= AMD_CG_SUPPORT_BIF_LS;
}
static void nbio_v7_4_ih_control(struct amdgpu_device *adev)
{
u32 interrupt_cntl;
/* setup interrupt control */
WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL);
/* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
*/
interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
/* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
}
static u32 nbio_v7_4_get_hdp_flush_req_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ);
}
static u32 nbio_v7_4_get_hdp_flush_done_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE);
}
static u32 nbio_v7_4_get_pcie_index_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2);
}
static u32 nbio_v7_4_get_pcie_data_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
}
const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
.ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
.ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
.ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
.ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK,
.ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK,
.ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK,
.ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK,
.ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK,
.ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK,
.ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK,
.ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK,
.ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
};
static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
{
uint32_t baco_cntl;
if (amdgpu_sriov_vf(adev))
adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4) &&
!amdgpu_sriov_vf(adev)) {
baco_cntl = RREG32_SOC15(NBIO, 0, mmBACO_CNTL);
if (baco_cntl &
(BACO_CNTL__BACO_DUMMY_EN_MASK | BACO_CNTL__BACO_EN_MASK)) {
baco_cntl &= ~(BACO_CNTL__BACO_DUMMY_EN_MASK |
BACO_CNTL__BACO_EN_MASK);
dev_dbg(adev->dev, "Unsetting baco dummy mode %x",
baco_cntl);
WREG32_SOC15(NBIO, 0, mmBACO_CNTL, baco_cntl);
}
}
}
static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev)
{
uint32_t bif_doorbell_intr_cntl;
struct ras_manager *obj = amdgpu_ras_find_obj(adev, adev->nbio.ras_if);
struct ras_err_data err_data = {0, 0, 0, NULL};
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
if (adev->asic_type == CHIP_ALDEBARAN)
bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE);
else
bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
if (REG_GET_FIELD(bif_doorbell_intr_cntl,
BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) {
/* driver has to clear the interrupt status when bif ring is disabled */
bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
BIF_DOORBELL_INT_CNTL,
RAS_CNTLR_INTERRUPT_CLEAR, 1);
if (adev->asic_type == CHIP_ALDEBARAN)
WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE, bif_doorbell_intr_cntl);
else
WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
if (!ras->disable_ras_err_cnt_harvest) {
/*
* clear error status after ras_controller_intr
* according to hw team and count ue number
* for query
*/
nbio_v7_4_query_ras_error_count(adev, &err_data);
/* logging on error cnt and printing for awareness */
obj->err_data.ue_count += err_data.ue_count;
obj->err_data.ce_count += err_data.ce_count;
if (err_data.ce_count)
dev_info(adev->dev, "%ld correctable hardware "
"errors detected in %s block, "
"no user action is needed.\n",
obj->err_data.ce_count,
get_ras_block_str(adev->nbio.ras_if));
if (err_data.ue_count)
dev_info(adev->dev, "%ld uncorrectable hardware "
"errors detected in %s block\n",
obj->err_data.ue_count,
get_ras_block_str(adev->nbio.ras_if));
}
dev_info(adev->dev, "RAS controller interrupt triggered "
"by NBIF error\n");
/* ras_controller_int is dedicated for nbif ras error,
* not the global interrupt for sync flood
*/
amdgpu_ras_reset_gpu(adev);
}
}
static void nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev)
{
uint32_t bif_doorbell_intr_cntl;
if (adev->asic_type == CHIP_ALDEBARAN)
bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE);
else
bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
if (REG_GET_FIELD(bif_doorbell_intr_cntl,
BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) {
/* driver has to clear the interrupt status when bif ring is disabled */
bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
BIF_DOORBELL_INT_CNTL,
RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1);
if (adev->asic_type == CHIP_ALDEBARAN)
WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE, bif_doorbell_intr_cntl);
else
WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
amdgpu_ras_global_ras_isr(adev);
}
}
static int nbio_v7_4_set_ras_controller_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
{
/* The ras_controller_irq enablement should be done in psp bl when it
* tries to enable ras feature. Driver only need to set the correct interrupt
* vector for bare-metal and sriov use case respectively
*/
uint32_t bif_intr_cntl;
if (adev->asic_type == CHIP_ALDEBARAN)
bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE);
else
bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
if (state == AMDGPU_IRQ_STATE_ENABLE) {
/* set interrupt vector select bit to 0 to select
* vetcor 1 for bare metal case */
bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
BIF_INTR_CNTL,
RAS_INTR_VEC_SEL, 0);
if (adev->asic_type == CHIP_ALDEBARAN)
WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE, bif_intr_cntl);
else
WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
}
return 0;
}
static int nbio_v7_4_process_ras_controller_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
/* By design, the ih cookie for ras_controller_irq should be written
* to BIFring instead of general iv ring. However, due to known bif ring
* hw bug, it has to be disabled. There is no chance the process function
* will be involked. Just left it as a dummy one.
*/
return 0;
}
static int nbio_v7_4_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
{
/* The ras_controller_irq enablement should be done in psp bl when it
* tries to enable ras feature. Driver only need to set the correct interrupt
* vector for bare-metal and sriov use case respectively
*/
uint32_t bif_intr_cntl;
if (adev->asic_type == CHIP_ALDEBARAN)
bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE);
else
bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
if (state == AMDGPU_IRQ_STATE_ENABLE) {
/* set interrupt vector select bit to 0 to select
* vetcor 1 for bare metal case */
bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
BIF_INTR_CNTL,
RAS_INTR_VEC_SEL, 0);
if (adev->asic_type == CHIP_ALDEBARAN)
WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE, bif_intr_cntl);
else
WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
}
return 0;
}
static int nbio_v7_4_process_err_event_athub_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
/* By design, the ih cookie for err_event_athub_irq should be written
* to BIFring instead of general iv ring. However, due to known bif ring
* hw bug, it has to be disabled. There is no chance the process function
* will be involked. Just left it as a dummy one.
*/
return 0;
}
static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_controller_irq_funcs = {
.set = nbio_v7_4_set_ras_controller_irq_state,
.process = nbio_v7_4_process_ras_controller_irq,
};
static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_err_event_athub_irq_funcs = {
.set = nbio_v7_4_set_ras_err_event_athub_irq_state,
.process = nbio_v7_4_process_err_event_athub_irq,
};
static int nbio_v7_4_init_ras_controller_interrupt (struct amdgpu_device *adev)
{
int r;
/* init the irq funcs */
adev->nbio.ras_controller_irq.funcs =
&nbio_v7_4_ras_controller_irq_funcs;
adev->nbio.ras_controller_irq.num_types = 1;
/* register ras controller interrupt */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT,
&adev->nbio.ras_controller_irq);
return r;
}
static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *adev)
{
int r;
/* init the irq funcs */
adev->nbio.ras_err_event_athub_irq.funcs =
&nbio_v7_4_ras_err_event_athub_irq_funcs;
adev->nbio.ras_err_event_athub_irq.num_types = 1;
/* register ras err event athub interrupt */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT,
&adev->nbio.ras_err_event_athub_irq);
return r;
}
#define smnPARITY_ERROR_STATUS_UNCORR_GRP2 0x13a20030
#define smnPARITY_ERROR_STATUS_UNCORR_GRP2_ALDE 0x13b20030
#define smnRAS_GLOBAL_STATUS_LO_ALDE 0x13b20020
static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
uint32_t global_sts, central_sts, int_eoi, parity_sts;
uint32_t corr, fatal, non_fatal;
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
if (adev->asic_type == CHIP_ALDEBARAN)
global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO_ALDE);
else
global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO);
corr = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrCorr);
fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrFatal);
non_fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO,
ParityErrNonFatal);
if (adev->asic_type == CHIP_ALDEBARAN)
parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2_ALDE);
else
parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2);
if (corr)
err_data->ce_count++;
if (fatal)
err_data->ue_count++;
if (corr || fatal || non_fatal) {
central_sts = RREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS);
/* clear error status register */
if (adev->asic_type == CHIP_ALDEBARAN)
WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO_ALDE, global_sts);
else
WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts);
if (fatal)
{
/* clear parity fatal error indication field */
if (adev->asic_type == CHIP_ALDEBARAN)
WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2_ALDE, parity_sts);
else
WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2, parity_sts);
}
if (REG_GET_FIELD(central_sts, BIFL_RAS_CENTRAL_STATUS,
BIFL_RasContller_Intr_Recv)) {
/* clear interrupt status register */
WREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS, central_sts);
int_eoi = RREG32_PCIE(smnIOHC_INTERRUPT_EOI);
int_eoi = REG_SET_FIELD(int_eoi,
IOHC_INTERRUPT_EOI, SMI_EOI, 1);
WREG32_PCIE(smnIOHC_INTERRUPT_EOI, int_eoi);
}
}
}
static void nbio_v7_4_enable_doorbell_interrupt(struct amdgpu_device *adev,
bool enable)
{
if (adev->asic_type == CHIP_ALDEBARAN)
WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL_ALDE,
DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
else
WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL,
DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
}
const struct amdgpu_ras_block_hw_ops nbio_v7_4_ras_hw_ops = {
.query_ras_error_count = nbio_v7_4_query_ras_error_count,
};
struct amdgpu_nbio_ras nbio_v7_4_ras = {
.ras_block = {
.ras_comm = {
.name = "pcie_bif",
.block = AMDGPU_RAS_BLOCK__PCIE_BIF,
.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
},
.hw_ops = &nbio_v7_4_ras_hw_ops,
.ras_late_init = amdgpu_nbio_ras_late_init,
},
.handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring,
.handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring,
.init_ras_controller_interrupt = nbio_v7_4_init_ras_controller_interrupt,
.init_ras_err_event_athub_interrupt = nbio_v7_4_init_ras_err_event_athub_interrupt,
};
#ifdef CONFIG_PCIEASPM
static void nbio_v7_4_program_ltr(struct amdgpu_device *adev)
{
uint32_t def, data;
WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, 0x75EB);
def = data = RREG32_PCIE(smnRCC_BIF_STRAP2);
data &= ~RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK;
if (def != data)
WREG32_PCIE(smnRCC_BIF_STRAP2, data);
def = data = RREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL);
data &= ~EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK;
if (def != data)
WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, data);
def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
data |= BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
if (def != data)
WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
}
#endif
static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
{
#ifdef CONFIG_PCIEASPM
uint32_t def, data;
if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4))
return;
def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL, data);
def = data = RREG32_PCIE(smnPCIE_LC_CNTL7);
data |= PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK;
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL7, data);
def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK);
data |= NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK;
if (def != data)
WREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK, data);
def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
data |= PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL3, data);
def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
data &= ~RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK;
data &= ~RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK;
if (def != data)
WREG32_PCIE(smnRCC_BIF_STRAP3, data);
def = data = RREG32_PCIE(smnRCC_BIF_STRAP5);
data &= ~RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK;
if (def != data)
WREG32_PCIE(smnRCC_BIF_STRAP5, data);
def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
if (def != data)
WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP, 0x10011001);
def = data = RREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2);
data |= PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK |
PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
data &= ~PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK;
if (def != data)
WREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2, data);
def = data = RREG32_PCIE(smnPCIE_LC_CNTL6);
data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK |
PCIE_LC_CNTL6__LC_RX_L0S_STANDBY_EN_MASK;
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL6, data);
/* Don't bother about LTR if LTR is not enabled
* in the path */
if (adev->pdev->ltr_path)
nbio_v7_4_program_ltr(adev);
def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
data |= 0x0010 << RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT;
if (def != data)
WREG32_PCIE(smnRCC_BIF_STRAP3, data);
def = data = RREG32_PCIE(smnRCC_BIF_STRAP5);
data |= 0x0010 << RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT;
if (def != data)
WREG32_PCIE(smnRCC_BIF_STRAP5, data);
def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
data |= 0x1 << PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT;
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL, data);
def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL3, data);
#endif
}
const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
.get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset,
.get_pcie_index_offset = nbio_v7_4_get_pcie_index_offset,
.get_pcie_data_offset = nbio_v7_4_get_pcie_data_offset,
.get_rev_id = nbio_v7_4_get_rev_id,
.mc_access_enable = nbio_v7_4_mc_access_enable,
.get_memsize = nbio_v7_4_get_memsize,
.sdma_doorbell_range = nbio_v7_4_sdma_doorbell_range,
.vcn_doorbell_range = nbio_v7_4_vcn_doorbell_range,
.enable_doorbell_aperture = nbio_v7_4_enable_doorbell_aperture,
.enable_doorbell_selfring_aperture = nbio_v7_4_enable_doorbell_selfring_aperture,
.ih_doorbell_range = nbio_v7_4_ih_doorbell_range,
.enable_doorbell_interrupt = nbio_v7_4_enable_doorbell_interrupt,
.update_medium_grain_clock_gating = nbio_v7_4_update_medium_grain_clock_gating,
.update_medium_grain_light_sleep = nbio_v7_4_update_medium_grain_light_sleep,
.get_clockgating_state = nbio_v7_4_get_clockgating_state,
.ih_control = nbio_v7_4_ih_control,
.init_registers = nbio_v7_4_init_registers,
.remap_hdp_registers = nbio_v7_4_remap_hdp_registers,
.program_aspm = nbio_v7_4_program_aspm,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c |
// SPDX-License-Identifier: MIT
/*
* Copyright 2012 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/backlight.h>
#include <linux/slab.h>
#include <linux/xarray.h>
#include <linux/power_supply.h>
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
#include <acpi/video.h>
#include <acpi/actbl.h>
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_display.h"
#include "amd_acpi.h"
#include "atom.h"
/* Declare GUID for AMD _DSM method for XCCs */
static const guid_t amd_xcc_dsm_guid = GUID_INIT(0x8267f5d5, 0xa556, 0x44f2,
0xb8, 0xb4, 0x45, 0x56, 0x2e,
0x8c, 0x5b, 0xec);
#define AMD_XCC_HID_START 3000
#define AMD_XCC_DSM_GET_NUM_FUNCS 0
#define AMD_XCC_DSM_GET_SUPP_MODE 1
#define AMD_XCC_DSM_GET_XCP_MODE 2
#define AMD_XCC_DSM_GET_VF_XCC_MAPPING 4
#define AMD_XCC_DSM_GET_TMR_INFO 5
#define AMD_XCC_DSM_NUM_FUNCS 5
#define AMD_XCC_MAX_HID 24
struct xarray numa_info_xa;
/* Encapsulates the XCD acpi object information */
struct amdgpu_acpi_xcc_info {
struct list_head list;
struct amdgpu_numa_info *numa_info;
uint8_t xcp_node;
uint8_t phy_id;
acpi_handle handle;
};
struct amdgpu_acpi_dev_info {
struct list_head list;
struct list_head xcc_list;
uint16_t bdf;
uint16_t supp_xcp_mode;
uint16_t xcp_mode;
uint16_t mem_mode;
uint64_t tmr_base;
uint64_t tmr_size;
};
struct list_head amdgpu_acpi_dev_list;
struct amdgpu_atif_notification_cfg {
bool enabled;
int command_code;
};
struct amdgpu_atif_notifications {
bool thermal_state;
bool forced_power_state;
bool system_power_state;
bool brightness_change;
bool dgpu_display_event;
bool gpu_package_power_limit;
};
struct amdgpu_atif_functions {
bool system_params;
bool sbios_requests;
bool temperature_change;
bool query_backlight_transfer_characteristics;
bool ready_to_undock;
bool external_gpu_information;
};
struct amdgpu_atif {
acpi_handle handle;
struct amdgpu_atif_notifications notifications;
struct amdgpu_atif_functions functions;
struct amdgpu_atif_notification_cfg notification_cfg;
struct backlight_device *bd;
struct amdgpu_dm_backlight_caps backlight_caps;
};
struct amdgpu_atcs_functions {
bool get_ext_state;
bool pcie_perf_req;
bool pcie_dev_rdy;
bool pcie_bus_width;
bool power_shift_control;
};
struct amdgpu_atcs {
acpi_handle handle;
struct amdgpu_atcs_functions functions;
};
static struct amdgpu_acpi_priv {
struct amdgpu_atif atif;
struct amdgpu_atcs atcs;
} amdgpu_acpi_priv;
/* Call the ATIF method
*/
/**
* amdgpu_atif_call - call an ATIF method
*
* @atif: atif structure
* @function: the ATIF function to execute
* @params: ATIF function params
*
* Executes the requested ATIF function (all asics).
* Returns a pointer to the acpi output buffer.
*/
static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
int function,
struct acpi_buffer *params)
{
acpi_status status;
union acpi_object atif_arg_elements[2];
struct acpi_object_list atif_arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
atif_arg.count = 2;
atif_arg.pointer = &atif_arg_elements[0];
atif_arg_elements[0].type = ACPI_TYPE_INTEGER;
atif_arg_elements[0].integer.value = function;
if (params) {
atif_arg_elements[1].type = ACPI_TYPE_BUFFER;
atif_arg_elements[1].buffer.length = params->length;
atif_arg_elements[1].buffer.pointer = params->pointer;
} else {
/* We need a second fake parameter */
atif_arg_elements[1].type = ACPI_TYPE_INTEGER;
atif_arg_elements[1].integer.value = 0;
}
status = acpi_evaluate_object(atif->handle, NULL, &atif_arg,
&buffer);
/* Fail only if calling the method fails and ATIF is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
acpi_format_exception(status));
kfree(buffer.pointer);
return NULL;
}
return buffer.pointer;
}
/**
* amdgpu_atif_parse_notification - parse supported notifications
*
* @n: supported notifications struct
* @mask: supported notifications mask from ATIF
*
* Use the supported notifications mask from ATIF function
* ATIF_FUNCTION_VERIFY_INTERFACE to determine what notifications
* are supported (all asics).
*/
static void amdgpu_atif_parse_notification(struct amdgpu_atif_notifications *n, u32 mask)
{
n->thermal_state = mask & ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED;
n->forced_power_state = mask & ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED;
n->system_power_state = mask & ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED;
n->brightness_change = mask & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED;
n->dgpu_display_event = mask & ATIF_DGPU_DISPLAY_EVENT_SUPPORTED;
n->gpu_package_power_limit = mask & ATIF_GPU_PACKAGE_POWER_LIMIT_REQUEST_SUPPORTED;
}
/**
* amdgpu_atif_parse_functions - parse supported functions
*
* @f: supported functions struct
* @mask: supported functions mask from ATIF
*
* Use the supported functions mask from ATIF function
* ATIF_FUNCTION_VERIFY_INTERFACE to determine what functions
* are supported (all asics).
*/
static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mask)
{
f->system_params = mask & ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED;
f->sbios_requests = mask & ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED;
f->temperature_change = mask & ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED;
f->query_backlight_transfer_characteristics =
mask & ATIF_QUERY_BACKLIGHT_TRANSFER_CHARACTERISTICS_SUPPORTED;
f->ready_to_undock = mask & ATIF_READY_TO_UNDOCK_NOTIFICATION_SUPPORTED;
f->external_gpu_information = mask & ATIF_GET_EXTERNAL_GPU_INFORMATION_SUPPORTED;
}
/**
* amdgpu_atif_verify_interface - verify ATIF
*
* @atif: amdgpu atif struct
*
* Execute the ATIF_FUNCTION_VERIFY_INTERFACE ATIF function
* to initialize ATIF and determine what features are supported
* (all asics).
* returns 0 on success, error on failure.
*/
static int amdgpu_atif_verify_interface(struct amdgpu_atif *atif)
{
union acpi_object *info;
struct atif_verify_interface output;
size_t size;
int err = 0;
info = amdgpu_atif_call(atif, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
if (!info)
return -EIO;
memset(&output, 0, sizeof(output));
size = *(u16 *) info->buffer.pointer;
if (size < 12) {
DRM_INFO("ATIF buffer is too small: %zu\n", size);
err = -EINVAL;
goto out;
}
size = min(sizeof(output), size);
memcpy(&output, info->buffer.pointer, size);
/* TODO: check version? */
DRM_DEBUG_DRIVER("ATIF version %u\n", output.version);
amdgpu_atif_parse_notification(&atif->notifications, output.notification_mask);
amdgpu_atif_parse_functions(&atif->functions, output.function_bits);
out:
kfree(info);
return err;
}
/**
* amdgpu_atif_get_notification_params - determine notify configuration
*
* @atif: acpi handle
*
* Execute the ATIF_FUNCTION_GET_SYSTEM_PARAMETERS ATIF function
* to determine if a notifier is used and if so which one
* (all asics). This is either Notify(VGA, 0x81) or Notify(VGA, n)
* where n is specified in the result if a notifier is used.
* Returns 0 on success, error on failure.
*/
static int amdgpu_atif_get_notification_params(struct amdgpu_atif *atif)
{
union acpi_object *info;
struct amdgpu_atif_notification_cfg *n = &atif->notification_cfg;
struct atif_system_params params;
size_t size;
int err = 0;
info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS,
NULL);
if (!info) {
err = -EIO;
goto out;
}
size = *(u16 *) info->buffer.pointer;
if (size < 10) {
err = -EINVAL;
goto out;
}
memset(¶ms, 0, sizeof(params));
size = min(sizeof(params), size);
memcpy(¶ms, info->buffer.pointer, size);
DRM_DEBUG_DRIVER("SYSTEM_PARAMS: mask = %#x, flags = %#x\n",
params.flags, params.valid_mask);
params.flags = params.flags & params.valid_mask;
if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_NONE) {
n->enabled = false;
n->command_code = 0;
} else if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_81) {
n->enabled = true;
n->command_code = 0x81;
} else {
if (size < 11) {
err = -EINVAL;
goto out;
}
n->enabled = true;
n->command_code = params.command_code;
}
out:
DRM_DEBUG_DRIVER("Notification %s, command code = %#x\n",
(n->enabled ? "enabled" : "disabled"),
n->command_code);
kfree(info);
return err;
}
/**
* amdgpu_atif_query_backlight_caps - get min and max backlight input signal
*
* @atif: acpi handle
*
* Execute the QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS ATIF function
* to determine the acceptable range of backlight values
*
* Backlight_caps.caps_valid will be set to true if the query is successful
*
* The input signals are in range 0-255
*
* This function assumes the display with backlight is the first LCD
*
* Returns 0 on success, error on failure.
*/
static int amdgpu_atif_query_backlight_caps(struct amdgpu_atif *atif)
{
union acpi_object *info;
struct atif_qbtc_output characteristics;
struct atif_qbtc_arguments arguments;
struct acpi_buffer params;
size_t size;
int err = 0;
arguments.size = sizeof(arguments);
arguments.requested_display = ATIF_QBTC_REQUEST_LCD1;
params.length = sizeof(arguments);
params.pointer = (void *)&arguments;
info = amdgpu_atif_call(atif,
ATIF_FUNCTION_QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS,
¶ms);
if (!info) {
err = -EIO;
goto out;
}
size = *(u16 *) info->buffer.pointer;
if (size < 10) {
err = -EINVAL;
goto out;
}
memset(&characteristics, 0, sizeof(characteristics));
size = min(sizeof(characteristics), size);
memcpy(&characteristics, info->buffer.pointer, size);
atif->backlight_caps.caps_valid = true;
atif->backlight_caps.min_input_signal =
characteristics.min_input_signal;
atif->backlight_caps.max_input_signal =
characteristics.max_input_signal;
out:
kfree(info);
return err;
}
/**
* amdgpu_atif_get_sbios_requests - get requested sbios event
*
* @atif: acpi handle
* @req: atif sbios request struct
*
* Execute the ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS ATIF function
* to determine what requests the sbios is making to the driver
* (all asics).
* Returns 0 on success, error on failure.
*/
static int amdgpu_atif_get_sbios_requests(struct amdgpu_atif *atif,
struct atif_sbios_requests *req)
{
union acpi_object *info;
size_t size;
int count = 0;
info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS,
NULL);
if (!info)
return -EIO;
size = *(u16 *)info->buffer.pointer;
if (size < 0xd) {
count = -EINVAL;
goto out;
}
memset(req, 0, sizeof(*req));
size = min(sizeof(*req), size);
memcpy(req, info->buffer.pointer, size);
DRM_DEBUG_DRIVER("SBIOS pending requests: %#x\n", req->pending);
count = hweight32(req->pending);
out:
kfree(info);
return count;
}
/**
* amdgpu_atif_handler - handle ATIF notify requests
*
* @adev: amdgpu_device pointer
* @event: atif sbios request struct
*
* Checks the acpi event and if it matches an atif event,
* handles it.
*
* Returns:
* NOTIFY_BAD or NOTIFY_DONE, depending on the event.
*/
static int amdgpu_atif_handler(struct amdgpu_device *adev,
struct acpi_bus_event *event)
{
struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif;
int count;
DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n",
event->device_class, event->type);
if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
return NOTIFY_DONE;
/* Is this actually our event? */
if (!atif->notification_cfg.enabled ||
event->type != atif->notification_cfg.command_code) {
/* These events will generate keypresses otherwise */
if (event->type == ACPI_VIDEO_NOTIFY_PROBE)
return NOTIFY_BAD;
else
return NOTIFY_DONE;
}
if (atif->functions.sbios_requests) {
struct atif_sbios_requests req;
/* Check pending SBIOS requests */
count = amdgpu_atif_get_sbios_requests(atif, &req);
if (count <= 0)
return NOTIFY_BAD;
DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) {
if (atif->bd) {
DRM_DEBUG_DRIVER("Changing brightness to %d\n",
req.backlight_level);
/*
* XXX backlight_device_set_brightness() is
* hardwired to post BACKLIGHT_UPDATE_SYSFS.
* It probably should accept 'reason' parameter.
*/
backlight_device_set_brightness(atif->bd, req.backlight_level);
}
}
if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
if (adev->flags & AMD_IS_PX) {
pm_runtime_get_sync(adev_to_drm(adev)->dev);
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(adev_to_drm(adev));
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
}
}
/* TODO: check other events */
}
/* We've handled the event, stop the notifier chain. The ACPI interface
* overloads ACPI_VIDEO_NOTIFY_PROBE, we don't want to send that to
* userspace if the event was generated only to signal a SBIOS
* request.
*/
return NOTIFY_BAD;
}
/* Call the ATCS method
*/
/**
* amdgpu_atcs_call - call an ATCS method
*
* @atcs: atcs structure
* @function: the ATCS function to execute
* @params: ATCS function params
*
* Executes the requested ATCS function (all asics).
* Returns a pointer to the acpi output buffer.
*/
static union acpi_object *amdgpu_atcs_call(struct amdgpu_atcs *atcs,
int function,
struct acpi_buffer *params)
{
acpi_status status;
union acpi_object atcs_arg_elements[2];
struct acpi_object_list atcs_arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
atcs_arg.count = 2;
atcs_arg.pointer = &atcs_arg_elements[0];
atcs_arg_elements[0].type = ACPI_TYPE_INTEGER;
atcs_arg_elements[0].integer.value = function;
if (params) {
atcs_arg_elements[1].type = ACPI_TYPE_BUFFER;
atcs_arg_elements[1].buffer.length = params->length;
atcs_arg_elements[1].buffer.pointer = params->pointer;
} else {
/* We need a second fake parameter */
atcs_arg_elements[1].type = ACPI_TYPE_INTEGER;
atcs_arg_elements[1].integer.value = 0;
}
status = acpi_evaluate_object(atcs->handle, NULL, &atcs_arg, &buffer);
/* Fail only if calling the method fails and ATIF is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
DRM_DEBUG_DRIVER("failed to evaluate ATCS got %s\n",
acpi_format_exception(status));
kfree(buffer.pointer);
return NULL;
}
return buffer.pointer;
}
/**
* amdgpu_atcs_parse_functions - parse supported functions
*
* @f: supported functions struct
* @mask: supported functions mask from ATCS
*
* Use the supported functions mask from ATCS function
* ATCS_FUNCTION_VERIFY_INTERFACE to determine what functions
* are supported (all asics).
*/
static void amdgpu_atcs_parse_functions(struct amdgpu_atcs_functions *f, u32 mask)
{
f->get_ext_state = mask & ATCS_GET_EXTERNAL_STATE_SUPPORTED;
f->pcie_perf_req = mask & ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED;
f->pcie_dev_rdy = mask & ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED;
f->pcie_bus_width = mask & ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED;
f->power_shift_control = mask & ATCS_SET_POWER_SHIFT_CONTROL_SUPPORTED;
}
/**
* amdgpu_atcs_verify_interface - verify ATCS
*
* @atcs: amdgpu atcs struct
*
* Execute the ATCS_FUNCTION_VERIFY_INTERFACE ATCS function
* to initialize ATCS and determine what features are supported
* (all asics).
* returns 0 on success, error on failure.
*/
static int amdgpu_atcs_verify_interface(struct amdgpu_atcs *atcs)
{
union acpi_object *info;
struct atcs_verify_interface output;
size_t size;
int err = 0;
info = amdgpu_atcs_call(atcs, ATCS_FUNCTION_VERIFY_INTERFACE, NULL);
if (!info)
return -EIO;
memset(&output, 0, sizeof(output));
size = *(u16 *) info->buffer.pointer;
if (size < 8) {
DRM_INFO("ATCS buffer is too small: %zu\n", size);
err = -EINVAL;
goto out;
}
size = min(sizeof(output), size);
memcpy(&output, info->buffer.pointer, size);
/* TODO: check version? */
DRM_DEBUG_DRIVER("ATCS version %u\n", output.version);
amdgpu_atcs_parse_functions(&atcs->functions, output.function_bits);
out:
kfree(info);
return err;
}
/**
* amdgpu_acpi_is_pcie_performance_request_supported
*
* @adev: amdgpu_device pointer
*
* Check if the ATCS pcie_perf_req and pcie_dev_rdy methods
* are supported (all asics).
* returns true if supported, false if not.
*/
bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev)
{
struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs;
if (atcs->functions.pcie_perf_req && atcs->functions.pcie_dev_rdy)
return true;
return false;
}
/**
* amdgpu_acpi_is_power_shift_control_supported
*
* Check if the ATCS power shift control method
* is supported.
* returns true if supported, false if not.
*/
bool amdgpu_acpi_is_power_shift_control_supported(void)
{
return amdgpu_acpi_priv.atcs.functions.power_shift_control;
}
/**
* amdgpu_acpi_pcie_notify_device_ready
*
* @adev: amdgpu_device pointer
*
* Executes the PCIE_DEVICE_READY_NOTIFICATION method
* (all asics).
* returns 0 on success, error on failure.
*/
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev)
{
union acpi_object *info;
struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs;
if (!atcs->functions.pcie_dev_rdy)
return -EINVAL;
info = amdgpu_atcs_call(atcs, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION, NULL);
if (!info)
return -EIO;
kfree(info);
return 0;
}
/**
* amdgpu_acpi_pcie_performance_request
*
* @adev: amdgpu_device pointer
* @perf_req: requested perf level (pcie gen speed)
* @advertise: set advertise caps flag if set
*
* Executes the PCIE_PERFORMANCE_REQUEST method to
* change the pcie gen speed (all asics).
* returns 0 on success, error on failure.
*/
int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
u8 perf_req, bool advertise)
{
union acpi_object *info;
struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs;
struct atcs_pref_req_input atcs_input;
struct atcs_pref_req_output atcs_output;
struct acpi_buffer params;
size_t size;
u32 retry = 3;
if (amdgpu_acpi_pcie_notify_device_ready(adev))
return -EINVAL;
if (!atcs->functions.pcie_perf_req)
return -EINVAL;
atcs_input.size = sizeof(struct atcs_pref_req_input);
/* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
atcs_input.client_id = pci_dev_id(adev->pdev);
atcs_input.valid_flags_mask = ATCS_VALID_FLAGS_MASK;
atcs_input.flags = ATCS_WAIT_FOR_COMPLETION;
if (advertise)
atcs_input.flags |= ATCS_ADVERTISE_CAPS;
atcs_input.req_type = ATCS_PCIE_LINK_SPEED;
atcs_input.perf_req = perf_req;
params.length = sizeof(struct atcs_pref_req_input);
params.pointer = &atcs_input;
while (retry--) {
info = amdgpu_atcs_call(atcs, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, ¶ms);
if (!info)
return -EIO;
memset(&atcs_output, 0, sizeof(atcs_output));
size = *(u16 *) info->buffer.pointer;
if (size < 3) {
DRM_INFO("ATCS buffer is too small: %zu\n", size);
kfree(info);
return -EINVAL;
}
size = min(sizeof(atcs_output), size);
memcpy(&atcs_output, info->buffer.pointer, size);
kfree(info);
switch (atcs_output.ret_val) {
case ATCS_REQUEST_REFUSED:
default:
return -EINVAL;
case ATCS_REQUEST_COMPLETE:
return 0;
case ATCS_REQUEST_IN_PROGRESS:
udelay(10);
break;
}
}
return 0;
}
/**
* amdgpu_acpi_power_shift_control
*
* @adev: amdgpu_device pointer
* @dev_state: device acpi state
* @drv_state: driver state
*
* Executes the POWER_SHIFT_CONTROL method to
* communicate current dGPU device state and
* driver state to APU/SBIOS.
* returns 0 on success, error on failure.
*/
int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
u8 dev_state, bool drv_state)
{
union acpi_object *info;
struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs;
struct atcs_pwr_shift_input atcs_input;
struct acpi_buffer params;
if (!amdgpu_acpi_is_power_shift_control_supported())
return -EINVAL;
atcs_input.size = sizeof(struct atcs_pwr_shift_input);
/* dGPU id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
atcs_input.dgpu_id = pci_dev_id(adev->pdev);
atcs_input.dev_acpi_state = dev_state;
atcs_input.drv_state = drv_state;
params.length = sizeof(struct atcs_pwr_shift_input);
params.pointer = &atcs_input;
info = amdgpu_atcs_call(atcs, ATCS_FUNCTION_POWER_SHIFT_CONTROL, ¶ms);
if (!info) {
DRM_ERROR("ATCS PSC update failed\n");
return -EIO;
}
return 0;
}
/**
* amdgpu_acpi_smart_shift_update - update dGPU device state to SBIOS
*
* @dev: drm_device pointer
* @ss_state: current smart shift event
*
* returns 0 on success,
* otherwise return error number.
*/
int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state)
{
struct amdgpu_device *adev = drm_to_adev(dev);
int r;
if (!amdgpu_device_supports_smart_shift(dev))
return 0;
switch (ss_state) {
/* SBIOS trigger “stop”, “enable” and “start” at D0, Driver Operational.
* SBIOS trigger “stop” at D3, Driver Not Operational.
* SBIOS trigger “stop” and “disable” at D0, Driver NOT operational.
*/
case AMDGPU_SS_DRV_LOAD:
r = amdgpu_acpi_power_shift_control(adev,
AMDGPU_ATCS_PSC_DEV_STATE_D0,
AMDGPU_ATCS_PSC_DRV_STATE_OPR);
break;
case AMDGPU_SS_DEV_D0:
r = amdgpu_acpi_power_shift_control(adev,
AMDGPU_ATCS_PSC_DEV_STATE_D0,
AMDGPU_ATCS_PSC_DRV_STATE_OPR);
break;
case AMDGPU_SS_DEV_D3:
r = amdgpu_acpi_power_shift_control(adev,
AMDGPU_ATCS_PSC_DEV_STATE_D3_HOT,
AMDGPU_ATCS_PSC_DRV_STATE_NOT_OPR);
break;
case AMDGPU_SS_DRV_UNLOAD:
r = amdgpu_acpi_power_shift_control(adev,
AMDGPU_ATCS_PSC_DEV_STATE_D0,
AMDGPU_ATCS_PSC_DRV_STATE_NOT_OPR);
break;
default:
return -EINVAL;
}
return r;
}
#ifdef CONFIG_ACPI_NUMA
static inline uint64_t amdgpu_acpi_get_numa_size(int nid)
{
/* This is directly using si_meminfo_node implementation as the
* function is not exported.
*/
int zone_type;
uint64_t managed_pages = 0;
pg_data_t *pgdat = NODE_DATA(nid);
for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
managed_pages +=
zone_managed_pages(&pgdat->node_zones[zone_type]);
return managed_pages * PAGE_SIZE;
}
static struct amdgpu_numa_info *amdgpu_acpi_get_numa_info(uint32_t pxm)
{
struct amdgpu_numa_info *numa_info;
int nid;
numa_info = xa_load(&numa_info_xa, pxm);
if (!numa_info) {
struct sysinfo info;
numa_info = kzalloc(sizeof(*numa_info), GFP_KERNEL);
if (!numa_info)
return NULL;
nid = pxm_to_node(pxm);
numa_info->pxm = pxm;
numa_info->nid = nid;
if (numa_info->nid == NUMA_NO_NODE) {
si_meminfo(&info);
numa_info->size = info.totalram * info.mem_unit;
} else {
numa_info->size = amdgpu_acpi_get_numa_size(nid);
}
xa_store(&numa_info_xa, numa_info->pxm, numa_info, GFP_KERNEL);
}
return numa_info;
}
#endif
/**
* amdgpu_acpi_get_node_id - obtain the NUMA node id for corresponding amdgpu
* acpi device handle
*
* @handle: acpi handle
* @numa_info: amdgpu_numa_info structure holding numa information
*
* Queries the ACPI interface to fetch the corresponding NUMA Node ID for a
* given amdgpu acpi device.
*
* Returns ACPI STATUS OK with Node ID on success or the corresponding failure reason
*/
static acpi_status amdgpu_acpi_get_node_id(acpi_handle handle,
struct amdgpu_numa_info **numa_info)
{
#ifdef CONFIG_ACPI_NUMA
u64 pxm;
acpi_status status;
if (!numa_info)
return_ACPI_STATUS(AE_ERROR);
status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm);
if (ACPI_FAILURE(status))
return status;
*numa_info = amdgpu_acpi_get_numa_info(pxm);
if (!*numa_info)
return_ACPI_STATUS(AE_ERROR);
return_ACPI_STATUS(AE_OK);
#else
return_ACPI_STATUS(AE_NOT_EXIST);
#endif
}
static struct amdgpu_acpi_dev_info *amdgpu_acpi_get_dev(u16 bdf)
{
struct amdgpu_acpi_dev_info *acpi_dev;
if (list_empty(&amdgpu_acpi_dev_list))
return NULL;
list_for_each_entry(acpi_dev, &amdgpu_acpi_dev_list, list)
if (acpi_dev->bdf == bdf)
return acpi_dev;
return NULL;
}
static int amdgpu_acpi_dev_init(struct amdgpu_acpi_dev_info **dev_info,
struct amdgpu_acpi_xcc_info *xcc_info, u16 bdf)
{
struct amdgpu_acpi_dev_info *tmp;
union acpi_object *obj;
int ret = -ENOENT;
*dev_info = NULL;
tmp = kzalloc(sizeof(struct amdgpu_acpi_dev_info), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
INIT_LIST_HEAD(&tmp->xcc_list);
INIT_LIST_HEAD(&tmp->list);
tmp->bdf = bdf;
obj = acpi_evaluate_dsm_typed(xcc_info->handle, &amd_xcc_dsm_guid, 0,
AMD_XCC_DSM_GET_SUPP_MODE, NULL,
ACPI_TYPE_INTEGER);
if (!obj) {
acpi_handle_debug(xcc_info->handle,
"_DSM function %d evaluation failed",
AMD_XCC_DSM_GET_SUPP_MODE);
ret = -ENOENT;
goto out;
}
tmp->supp_xcp_mode = obj->integer.value & 0xFFFF;
ACPI_FREE(obj);
obj = acpi_evaluate_dsm_typed(xcc_info->handle, &amd_xcc_dsm_guid, 0,
AMD_XCC_DSM_GET_XCP_MODE, NULL,
ACPI_TYPE_INTEGER);
if (!obj) {
acpi_handle_debug(xcc_info->handle,
"_DSM function %d evaluation failed",
AMD_XCC_DSM_GET_XCP_MODE);
ret = -ENOENT;
goto out;
}
tmp->xcp_mode = obj->integer.value & 0xFFFF;
tmp->mem_mode = (obj->integer.value >> 32) & 0xFFFF;
ACPI_FREE(obj);
/* Evaluate DSMs and fill XCC information */
obj = acpi_evaluate_dsm_typed(xcc_info->handle, &amd_xcc_dsm_guid, 0,
AMD_XCC_DSM_GET_TMR_INFO, NULL,
ACPI_TYPE_PACKAGE);
if (!obj || obj->package.count < 2) {
acpi_handle_debug(xcc_info->handle,
"_DSM function %d evaluation failed",
AMD_XCC_DSM_GET_TMR_INFO);
ret = -ENOENT;
goto out;
}
tmp->tmr_base = obj->package.elements[0].integer.value;
tmp->tmr_size = obj->package.elements[1].integer.value;
ACPI_FREE(obj);
DRM_DEBUG_DRIVER(
"New dev(%x): Supported xcp mode: %x curr xcp_mode : %x mem mode : %x, tmr base: %llx tmr size: %llx ",
tmp->bdf, tmp->supp_xcp_mode, tmp->xcp_mode, tmp->mem_mode,
tmp->tmr_base, tmp->tmr_size);
list_add_tail(&tmp->list, &amdgpu_acpi_dev_list);
*dev_info = tmp;
return 0;
out:
if (obj)
ACPI_FREE(obj);
kfree(tmp);
return ret;
}
static int amdgpu_acpi_get_xcc_info(struct amdgpu_acpi_xcc_info *xcc_info,
u16 *bdf)
{
union acpi_object *obj;
acpi_status status;
int ret = -ENOENT;
obj = acpi_evaluate_dsm_typed(xcc_info->handle, &amd_xcc_dsm_guid, 0,
AMD_XCC_DSM_GET_NUM_FUNCS, NULL,
ACPI_TYPE_INTEGER);
if (!obj || obj->integer.value != AMD_XCC_DSM_NUM_FUNCS)
goto out;
ACPI_FREE(obj);
/* Evaluate DSMs and fill XCC information */
obj = acpi_evaluate_dsm_typed(xcc_info->handle, &amd_xcc_dsm_guid, 0,
AMD_XCC_DSM_GET_VF_XCC_MAPPING, NULL,
ACPI_TYPE_INTEGER);
if (!obj) {
acpi_handle_debug(xcc_info->handle,
"_DSM function %d evaluation failed",
AMD_XCC_DSM_GET_VF_XCC_MAPPING);
ret = -EINVAL;
goto out;
}
/* PF xcc id [39:32] */
xcc_info->phy_id = (obj->integer.value >> 32) & 0xFF;
/* xcp node of this xcc [47:40] */
xcc_info->xcp_node = (obj->integer.value >> 40) & 0xFF;
/* PF bus/dev/fn of this xcc [63:48] */
*bdf = (obj->integer.value >> 48) & 0xFFFF;
ACPI_FREE(obj);
obj = NULL;
status =
amdgpu_acpi_get_node_id(xcc_info->handle, &xcc_info->numa_info);
/* TODO: check if this check is required */
if (ACPI_SUCCESS(status))
ret = 0;
out:
if (obj)
ACPI_FREE(obj);
return ret;
}
static int amdgpu_acpi_enumerate_xcc(void)
{
struct amdgpu_acpi_dev_info *dev_info = NULL;
struct amdgpu_acpi_xcc_info *xcc_info;
struct acpi_device *acpi_dev;
char hid[ACPI_ID_LEN];
int ret, id;
u16 bdf;
INIT_LIST_HEAD(&amdgpu_acpi_dev_list);
xa_init(&numa_info_xa);
for (id = 0; id < AMD_XCC_MAX_HID; id++) {
sprintf(hid, "%s%d", "AMD", AMD_XCC_HID_START + id);
acpi_dev = acpi_dev_get_first_match_dev(hid, NULL, -1);
/* These ACPI objects are expected to be in sequential order. If
* one is not found, no need to check the rest.
*/
if (!acpi_dev) {
DRM_DEBUG_DRIVER("No matching acpi device found for %s",
hid);
break;
}
xcc_info = kzalloc(sizeof(struct amdgpu_acpi_xcc_info),
GFP_KERNEL);
if (!xcc_info) {
DRM_ERROR("Failed to allocate memory for xcc info\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&xcc_info->list);
xcc_info->handle = acpi_device_handle(acpi_dev);
acpi_dev_put(acpi_dev);
ret = amdgpu_acpi_get_xcc_info(xcc_info, &bdf);
if (ret) {
kfree(xcc_info);
continue;
}
dev_info = amdgpu_acpi_get_dev(bdf);
if (!dev_info)
ret = amdgpu_acpi_dev_init(&dev_info, xcc_info, bdf);
if (ret == -ENOMEM)
return ret;
if (!dev_info) {
kfree(xcc_info);
continue;
}
list_add_tail(&xcc_info->list, &dev_info->xcc_list);
}
return 0;
}
int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset,
u64 *tmr_size)
{
struct amdgpu_acpi_dev_info *dev_info;
u16 bdf;
if (!tmr_offset || !tmr_size)
return -EINVAL;
bdf = pci_dev_id(adev->pdev);
dev_info = amdgpu_acpi_get_dev(bdf);
if (!dev_info)
return -ENOENT;
*tmr_offset = dev_info->tmr_base;
*tmr_size = dev_info->tmr_size;
return 0;
}
int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev, int xcc_id,
struct amdgpu_numa_info *numa_info)
{
struct amdgpu_acpi_dev_info *dev_info;
struct amdgpu_acpi_xcc_info *xcc_info;
u16 bdf;
if (!numa_info)
return -EINVAL;
bdf = pci_dev_id(adev->pdev);
dev_info = amdgpu_acpi_get_dev(bdf);
if (!dev_info)
return -ENOENT;
list_for_each_entry(xcc_info, &dev_info->xcc_list, list) {
if (xcc_info->phy_id == xcc_id) {
memcpy(numa_info, xcc_info->numa_info,
sizeof(*numa_info));
return 0;
}
}
return -ENOENT;
}
/**
* amdgpu_acpi_event - handle notify events
*
* @nb: notifier block
* @val: val
* @data: acpi event
*
* Calls relevant amdgpu functions in response to various
* acpi events.
* Returns NOTIFY code
*/
static int amdgpu_acpi_event(struct notifier_block *nb,
unsigned long val,
void *data)
{
struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, acpi_nb);
struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
if (power_supply_is_system_supplied() > 0)
DRM_DEBUG_DRIVER("pm: AC\n");
else
DRM_DEBUG_DRIVER("pm: DC\n");
amdgpu_pm_acpi_event_handler(adev);
}
/* Check for pending SBIOS requests */
return amdgpu_atif_handler(adev, entry);
}
/* Call all ACPI methods here */
/**
* amdgpu_acpi_init - init driver acpi support
*
* @adev: amdgpu_device pointer
*
* Verifies the AMD ACPI interfaces and registers with the acpi
* notifier chain (all asics).
* Returns 0 on success, error on failure.
*/
int amdgpu_acpi_init(struct amdgpu_device *adev)
{
struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif;
if (atif->notifications.brightness_change) {
if (adev->dc_enabled) {
#if defined(CONFIG_DRM_AMD_DC)
struct amdgpu_display_manager *dm = &adev->dm;
if (dm->backlight_dev[0])
atif->bd = dm->backlight_dev[0];
#endif
} else {
struct drm_encoder *tmp;
/* Find the encoder controlling the brightness */
list_for_each_entry(tmp, &adev_to_drm(adev)->mode_config.encoder_list,
head) {
struct amdgpu_encoder *enc = to_amdgpu_encoder(tmp);
if ((enc->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
enc->enc_priv) {
struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
if (dig->bl_dev) {
atif->bd = dig->bl_dev;
break;
}
}
}
}
}
adev->acpi_nb.notifier_call = amdgpu_acpi_event;
register_acpi_notifier(&adev->acpi_nb);
return 0;
}
void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps)
{
struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif;
caps->caps_valid = atif->backlight_caps.caps_valid;
caps->min_input_signal = atif->backlight_caps.min_input_signal;
caps->max_input_signal = atif->backlight_caps.max_input_signal;
}
/**
* amdgpu_acpi_fini - tear down driver acpi support
*
* @adev: amdgpu_device pointer
*
* Unregisters with the acpi notifier chain (all asics).
*/
void amdgpu_acpi_fini(struct amdgpu_device *adev)
{
unregister_acpi_notifier(&adev->acpi_nb);
}
/**
* amdgpu_atif_pci_probe_handle - look up the ATIF handle
*
* @pdev: pci device
*
* Look up the ATIF handles (all asics).
* Returns true if the handle is found, false if not.
*/
static bool amdgpu_atif_pci_probe_handle(struct pci_dev *pdev)
{
char acpi_method_name[255] = { 0 };
struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
acpi_handle dhandle, atif_handle;
acpi_status status;
int ret;
dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
status = acpi_get_handle(dhandle, "ATIF", &atif_handle);
if (ACPI_FAILURE(status))
return false;
amdgpu_acpi_priv.atif.handle = atif_handle;
acpi_get_name(amdgpu_acpi_priv.atif.handle, ACPI_FULL_PATHNAME, &buffer);
DRM_DEBUG_DRIVER("Found ATIF handle %s\n", acpi_method_name);
ret = amdgpu_atif_verify_interface(&amdgpu_acpi_priv.atif);
if (ret) {
amdgpu_acpi_priv.atif.handle = 0;
return false;
}
return true;
}
/**
* amdgpu_atcs_pci_probe_handle - look up the ATCS handle
*
* @pdev: pci device
*
* Look up the ATCS handles (all asics).
* Returns true if the handle is found, false if not.
*/
static bool amdgpu_atcs_pci_probe_handle(struct pci_dev *pdev)
{
char acpi_method_name[255] = { 0 };
struct acpi_buffer buffer = { sizeof(acpi_method_name), acpi_method_name };
acpi_handle dhandle, atcs_handle;
acpi_status status;
int ret;
dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
status = acpi_get_handle(dhandle, "ATCS", &atcs_handle);
if (ACPI_FAILURE(status))
return false;
amdgpu_acpi_priv.atcs.handle = atcs_handle;
acpi_get_name(amdgpu_acpi_priv.atcs.handle, ACPI_FULL_PATHNAME, &buffer);
DRM_DEBUG_DRIVER("Found ATCS handle %s\n", acpi_method_name);
ret = amdgpu_atcs_verify_interface(&amdgpu_acpi_priv.atcs);
if (ret) {
amdgpu_acpi_priv.atcs.handle = 0;
return false;
}
return true;
}
/**
* amdgpu_acpi_should_gpu_reset
*
* @adev: amdgpu_device_pointer
*
* returns true if should reset GPU, false if not
*/
bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
{
if ((adev->flags & AMD_IS_APU) &&
adev->gfx.imu.funcs) /* Not need to do mode2 reset for IMU enabled APUs */
return false;
if ((adev->flags & AMD_IS_APU) &&
amdgpu_acpi_is_s3_active(adev))
return false;
if (amdgpu_sriov_vf(adev))
return false;
#if IS_ENABLED(CONFIG_SUSPEND)
return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
#else
return true;
#endif
}
/*
* amdgpu_acpi_detect - detect ACPI ATIF/ATCS methods
*
* Check if we have the ATIF/ATCS methods and populate
* the structures in the driver.
*/
void amdgpu_acpi_detect(void)
{
struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif;
struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs;
struct pci_dev *pdev = NULL;
int ret;
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
if (!atif->handle)
amdgpu_atif_pci_probe_handle(pdev);
if (!atcs->handle)
amdgpu_atcs_pci_probe_handle(pdev);
}
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
if (!atif->handle)
amdgpu_atif_pci_probe_handle(pdev);
if (!atcs->handle)
amdgpu_atcs_pci_probe_handle(pdev);
}
if (atif->functions.sbios_requests && !atif->functions.system_params) {
/* XXX check this workraround, if sbios request function is
* present we have to see how it's configured in the system
* params
*/
atif->functions.system_params = true;
}
if (atif->functions.system_params) {
ret = amdgpu_atif_get_notification_params(atif);
if (ret) {
DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n",
ret);
/* Disable notification */
atif->notification_cfg.enabled = false;
}
}
if (atif->functions.query_backlight_transfer_characteristics) {
ret = amdgpu_atif_query_backlight_caps(atif);
if (ret) {
DRM_DEBUG_DRIVER("Call to QUERY_BACKLIGHT_TRANSFER_CHARACTERISTICS failed: %d\n",
ret);
atif->backlight_caps.caps_valid = false;
}
} else {
atif->backlight_caps.caps_valid = false;
}
amdgpu_acpi_enumerate_xcc();
}
void amdgpu_acpi_release(void)
{
struct amdgpu_acpi_dev_info *dev_info, *dev_tmp;
struct amdgpu_acpi_xcc_info *xcc_info, *xcc_tmp;
struct amdgpu_numa_info *numa_info;
unsigned long index;
xa_for_each(&numa_info_xa, index, numa_info) {
kfree(numa_info);
xa_erase(&numa_info_xa, index);
}
if (list_empty(&amdgpu_acpi_dev_list))
return;
list_for_each_entry_safe(dev_info, dev_tmp, &amdgpu_acpi_dev_list,
list) {
list_for_each_entry_safe(xcc_info, xcc_tmp, &dev_info->xcc_list,
list) {
list_del(&xcc_info->list);
kfree(xcc_info);
}
list_del(&dev_info->list);
kfree(dev_info);
}
}
#if IS_ENABLED(CONFIG_SUSPEND)
/**
* amdgpu_acpi_is_s3_active
*
* @adev: amdgpu_device_pointer
*
* returns true if supported, false if not.
*/
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev)
{
return !(adev->flags & AMD_IS_APU) ||
(pm_suspend_target_state == PM_SUSPEND_MEM);
}
/**
* amdgpu_acpi_is_s0ix_active
*
* @adev: amdgpu_device_pointer
*
* returns true if supported, false if not.
*/
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
{
if (!(adev->flags & AMD_IS_APU) ||
(pm_suspend_target_state != PM_SUSPEND_TO_IDLE))
return false;
if (adev->asic_type < CHIP_RAVEN)
return false;
/*
* If ACPI_FADT_LOW_POWER_S0 is not set in the FADT, it is generally
* risky to do any special firmware-related preparations for entering
* S0ix even though the system is suspending to idle, so return false
* in that case.
*/
if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) {
dev_err_once(adev->dev,
"Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n"
"To use suspend-to-idle change the sleep mode in BIOS setup.\n");
return false;
}
#if !IS_ENABLED(CONFIG_AMD_PMC)
dev_err_once(adev->dev,
"Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n");
return false;
#else
return true;
#endif /* CONFIG_AMD_PMC */
}
#endif /* CONFIG_SUSPEND */
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c |
/*
* Copyright 2014 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
* Copyright 2009 Jerome Glisse.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "amdgpu_rlc.h"
/**
* amdgpu_gfx_rlc_enter_safe_mode - Set RLC into safe mode
*
* @adev: amdgpu_device pointer
* @xcc_id: xcc accelerated compute core id
*
* Set RLC enter into safe mode if RLC is enabled and haven't in safe mode.
*/
void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
if (adev->gfx.rlc.in_safe_mode[xcc_id])
return;
/* if RLC is not enabled, do nothing */
if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
return;
if (adev->cg_flags &
(AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_3D_CGCG)) {
adev->gfx.rlc.funcs->set_safe_mode(adev, xcc_id);
adev->gfx.rlc.in_safe_mode[xcc_id] = true;
}
}
/**
* amdgpu_gfx_rlc_exit_safe_mode - Set RLC out of safe mode
*
* @adev: amdgpu_device pointer
* @xcc_id: xcc accelerated compute core id
*
* Set RLC exit safe mode if RLC is enabled and have entered into safe mode.
*/
void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
if (!(adev->gfx.rlc.in_safe_mode[xcc_id]))
return;
/* if RLC is not enabled, do nothing */
if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
return;
if (adev->cg_flags &
(AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_3D_CGCG)) {
adev->gfx.rlc.funcs->unset_safe_mode(adev, xcc_id);
adev->gfx.rlc.in_safe_mode[xcc_id] = false;
}
}
/**
* amdgpu_gfx_rlc_init_sr - Init save restore block
*
* @adev: amdgpu_device pointer
* @dws: the size of save restore block
*
* Allocate and setup value to save restore block of rlc.
* Returns 0 on succeess or negative error code if allocate failed.
*/
int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
{
const u32 *src_ptr;
volatile u32 *dst_ptr;
u32 i;
int r;
/* allocate save restore block */
r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.rlc.save_restore_obj,
&adev->gfx.rlc.save_restore_gpu_addr,
(void **)&adev->gfx.rlc.sr_ptr);
if (r) {
dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
amdgpu_gfx_rlc_fini(adev);
return r;
}
/* write the sr buffer */
src_ptr = adev->gfx.rlc.reg_list;
dst_ptr = adev->gfx.rlc.sr_ptr;
for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
dst_ptr[i] = cpu_to_le32(src_ptr[i]);
amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
return 0;
}
/**
* amdgpu_gfx_rlc_init_csb - Init clear state block
*
* @adev: amdgpu_device pointer
*
* Allocate and setup value to clear state block of rlc.
* Returns 0 on succeess or negative error code if allocate failed.
*/
int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev)
{
u32 dws;
int r;
/* allocate clear state block */
adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev);
r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr,
(void **)&adev->gfx.rlc.cs_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", r);
amdgpu_gfx_rlc_fini(adev);
return r;
}
return 0;
}
/**
* amdgpu_gfx_rlc_init_cpt - Init cp table
*
* @adev: amdgpu_device pointer
*
* Allocate and setup value to cp table of rlc.
* Returns 0 on succeess or negative error code if allocate failed.
*/
int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev)
{
int r;
r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.rlc.cp_table_obj,
&adev->gfx.rlc.cp_table_gpu_addr,
(void **)&adev->gfx.rlc.cp_table_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to create cp table bo\n", r);
amdgpu_gfx_rlc_fini(adev);
return r;
}
/* set up the cp table */
amdgpu_gfx_rlc_setup_cp_table(adev);
amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
return 0;
}
/**
* amdgpu_gfx_rlc_setup_cp_table - setup cp the buffer of cp table
*
* @adev: amdgpu_device pointer
*
* Write cp firmware data into cp table.
*/
void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev)
{
const __le32 *fw_data;
volatile u32 *dst_ptr;
int me, i, max_me;
u32 bo_offset = 0;
u32 table_offset, table_size;
max_me = adev->gfx.rlc.funcs->get_cp_table_num(adev);
/* write the cp table buffer */
dst_ptr = adev->gfx.rlc.cp_table_ptr;
for (me = 0; me < max_me; me++) {
if (me == 0) {
const struct gfx_firmware_header_v1_0 *hdr =
(const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
fw_data = (const __le32 *)
(adev->gfx.ce_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
table_offset = le32_to_cpu(hdr->jt_offset);
table_size = le32_to_cpu(hdr->jt_size);
} else if (me == 1) {
const struct gfx_firmware_header_v1_0 *hdr =
(const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
fw_data = (const __le32 *)
(adev->gfx.pfp_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
table_offset = le32_to_cpu(hdr->jt_offset);
table_size = le32_to_cpu(hdr->jt_size);
} else if (me == 2) {
const struct gfx_firmware_header_v1_0 *hdr =
(const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
fw_data = (const __le32 *)
(adev->gfx.me_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
table_offset = le32_to_cpu(hdr->jt_offset);
table_size = le32_to_cpu(hdr->jt_size);
} else if (me == 3) {
const struct gfx_firmware_header_v1_0 *hdr =
(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
fw_data = (const __le32 *)
(adev->gfx.mec_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
table_offset = le32_to_cpu(hdr->jt_offset);
table_size = le32_to_cpu(hdr->jt_size);
} else if (me == 4) {
const struct gfx_firmware_header_v1_0 *hdr =
(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
fw_data = (const __le32 *)
(adev->gfx.mec2_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
table_offset = le32_to_cpu(hdr->jt_offset);
table_size = le32_to_cpu(hdr->jt_size);
}
for (i = 0; i < table_size; i ++) {
dst_ptr[bo_offset + i] =
cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
}
bo_offset += table_size;
}
}
/**
* amdgpu_gfx_rlc_fini - Free BO which used for RLC
*
* @adev: amdgpu_device pointer
*
* Free three BO which is used for rlc_save_restore_block, rlc_clear_state_block
* and rlc_jump_table_block.
*/
void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev)
{
/* save restore block */
if (adev->gfx.rlc.save_restore_obj) {
amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj,
&adev->gfx.rlc.save_restore_gpu_addr,
(void **)&adev->gfx.rlc.sr_ptr);
}
/* clear state block */
amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr,
(void **)&adev->gfx.rlc.cs_ptr);
/* jump table block */
amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
&adev->gfx.rlc.cp_table_gpu_addr,
(void **)&adev->gfx.rlc.cp_table_ptr);
}
static int amdgpu_gfx_rlc_init_microcode_v2_0(struct amdgpu_device *adev)
{
const struct common_firmware_header *common_hdr;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
struct amdgpu_firmware_info *info;
unsigned int *tmp;
unsigned int i;
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
adev->gfx.rlc.save_and_restore_offset =
le32_to_cpu(rlc_hdr->save_and_restore_offset);
adev->gfx.rlc.clear_state_descriptor_offset =
le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
adev->gfx.rlc.avail_scratch_ram_locations =
le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
adev->gfx.rlc.reg_restore_list_size =
le32_to_cpu(rlc_hdr->reg_restore_list_size);
adev->gfx.rlc.reg_list_format_start =
le32_to_cpu(rlc_hdr->reg_list_format_start);
adev->gfx.rlc.reg_list_format_separate_start =
le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
adev->gfx.rlc.starting_offsets_start =
le32_to_cpu(rlc_hdr->starting_offsets_start);
adev->gfx.rlc.reg_list_format_size_bytes =
le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
adev->gfx.rlc.reg_list_size_bytes =
le32_to_cpu(rlc_hdr->reg_list_size_bytes);
adev->gfx.rlc.register_list_format =
kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
if (!adev->gfx.rlc.register_list_format) {
dev_err(adev->dev, "failed to allocate memory for rlc register_list_format\n");
return -ENOMEM;
}
tmp = (unsigned int *)((uintptr_t)rlc_hdr +
le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
tmp = (unsigned int *)((uintptr_t)rlc_hdr +
le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
info->fw = adev->gfx.rlc_fw;
if (info->fw) {
common_hdr = (const struct common_firmware_header *)info->fw->data;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(common_hdr->ucode_size_bytes), PAGE_SIZE);
}
}
return 0;
}
static void amdgpu_gfx_rlc_init_microcode_v2_1(struct amdgpu_device *adev)
{
const struct rlc_firmware_header_v2_1 *rlc_hdr;
struct amdgpu_firmware_info *info;
rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
adev->gfx.rlc.reg_list_format_direct_reg_list_length =
le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
if (adev->gfx.rlc.save_restore_list_cntl_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
}
if (adev->gfx.rlc.save_restore_list_gpm_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
}
if (adev->gfx.rlc.save_restore_list_srm_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
}
}
}
static void amdgpu_gfx_rlc_init_microcode_v2_2(struct amdgpu_device *adev)
{
const struct rlc_firmware_header_v2_2 *rlc_hdr;
struct amdgpu_firmware_info *info;
rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes);
adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes);
adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes);
adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
if (adev->gfx.rlc.rlc_iram_ucode_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM];
info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE);
}
if (adev->gfx.rlc.rlc_dram_ucode_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM];
info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE);
}
}
}
static void amdgpu_gfx_rlc_init_microcode_v2_3(struct amdgpu_device *adev)
{
const struct rlc_firmware_header_v2_3 *rlc_hdr;
struct amdgpu_firmware_info *info;
rlc_hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data;
adev->gfx.rlcp_ucode_version = le32_to_cpu(rlc_hdr->rlcp_ucode_version);
adev->gfx.rlcp_ucode_feature_version = le32_to_cpu(rlc_hdr->rlcp_ucode_feature_version);
adev->gfx.rlc.rlcp_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcp_ucode_size_bytes);
adev->gfx.rlc.rlcp_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcp_ucode_offset_bytes);
adev->gfx.rlcv_ucode_version = le32_to_cpu(rlc_hdr->rlcv_ucode_version);
adev->gfx.rlcv_ucode_feature_version = le32_to_cpu(rlc_hdr->rlcv_ucode_feature_version);
adev->gfx.rlc.rlcv_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcv_ucode_size_bytes);
adev->gfx.rlc.rlcv_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcv_ucode_offset_bytes);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
if (adev->gfx.rlc.rlcp_ucode_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_P];
info->ucode_id = AMDGPU_UCODE_ID_RLC_P;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.rlcp_ucode_size_bytes, PAGE_SIZE);
}
if (adev->gfx.rlc.rlcv_ucode_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_V];
info->ucode_id = AMDGPU_UCODE_ID_RLC_V;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.rlcv_ucode_size_bytes, PAGE_SIZE);
}
}
}
static void amdgpu_gfx_rlc_init_microcode_v2_4(struct amdgpu_device *adev)
{
const struct rlc_firmware_header_v2_4 *rlc_hdr;
struct amdgpu_firmware_info *info;
rlc_hdr = (const struct rlc_firmware_header_v2_4 *)adev->gfx.rlc_fw->data;
adev->gfx.rlc.global_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->global_tap_delays_ucode_size_bytes);
adev->gfx.rlc.global_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->global_tap_delays_ucode_offset_bytes);
adev->gfx.rlc.se0_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_size_bytes);
adev->gfx.rlc.se0_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_offset_bytes);
adev->gfx.rlc.se1_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_size_bytes);
adev->gfx.rlc.se1_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_offset_bytes);
adev->gfx.rlc.se2_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_size_bytes);
adev->gfx.rlc.se2_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_offset_bytes);
adev->gfx.rlc.se3_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_size_bytes);
adev->gfx.rlc.se3_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_offset_bytes);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
if (adev->gfx.rlc.global_tap_delays_ucode_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS];
info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE);
}
if (adev->gfx.rlc.se0_tap_delays_ucode_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS];
info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE);
}
if (adev->gfx.rlc.se1_tap_delays_ucode_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS];
info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE);
}
if (adev->gfx.rlc.se2_tap_delays_ucode_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS];
info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE);
}
if (adev->gfx.rlc.se3_tap_delays_ucode_size_bytes) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS];
info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS;
info->fw = adev->gfx.rlc_fw;
adev->firmware.fw_size +=
ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, PAGE_SIZE);
}
}
}
int amdgpu_gfx_rlc_init_microcode(struct amdgpu_device *adev,
uint16_t version_major,
uint16_t version_minor)
{
int err;
if (version_major < 2) {
/* only support rlc_hdr v2.x and onwards */
dev_err(adev->dev, "unsupported rlc fw hdr\n");
return -EINVAL;
}
/* is_rlc_v2_1 is still used in APU code path */
if (version_major == 2 && version_minor == 1)
adev->gfx.rlc.is_rlc_v2_1 = true;
if (version_minor >= 0) {
err = amdgpu_gfx_rlc_init_microcode_v2_0(adev);
if (err) {
dev_err(adev->dev, "fail to init rlc v2_0 microcode\n");
return err;
}
}
if (version_minor >= 1)
amdgpu_gfx_rlc_init_microcode_v2_1(adev);
if (version_minor >= 2)
amdgpu_gfx_rlc_init_microcode_v2_2(adev);
if (version_minor == 3)
amdgpu_gfx_rlc_init_microcode_v2_3(adev);
if (version_minor == 4)
amdgpu_gfx_rlc_init_microcode_v2_4(adev);
return 0;
}
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c |
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/pci.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_vblank.h>
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_i2c.h"
#include "atom.h"
#include "amdgpu_atombios.h"
#include "atombios_crtc.h"
#include "atombios_encoders.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
#include "amdgpu_display.h"
#include "bif/bif_3_0_d.h"
#include "bif/bif_3_0_sh_mask.h"
#include "oss/oss_1_0_d.h"
#include "oss/oss_1_0_sh_mask.h"
#include "gca/gfx_6_0_d.h"
#include "gca/gfx_6_0_sh_mask.h"
#include "gmc/gmc_6_0_d.h"
#include "gmc/gmc_6_0_sh_mask.h"
#include "dce/dce_6_0_d.h"
#include "dce/dce_6_0_sh_mask.h"
#include "gca/gfx_7_2_enum.h"
#include "dce_v6_0.h"
#include "si_enums.h"
static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static const u32 crtc_offsets[6] =
{
SI_CRTC0_REGISTER_OFFSET,
SI_CRTC1_REGISTER_OFFSET,
SI_CRTC2_REGISTER_OFFSET,
SI_CRTC3_REGISTER_OFFSET,
SI_CRTC4_REGISTER_OFFSET,
SI_CRTC5_REGISTER_OFFSET
};
static const u32 hpd_offsets[] =
{
mmDC_HPD1_INT_STATUS - mmDC_HPD1_INT_STATUS,
mmDC_HPD2_INT_STATUS - mmDC_HPD1_INT_STATUS,
mmDC_HPD3_INT_STATUS - mmDC_HPD1_INT_STATUS,
mmDC_HPD4_INT_STATUS - mmDC_HPD1_INT_STATUS,
mmDC_HPD5_INT_STATUS - mmDC_HPD1_INT_STATUS,
mmDC_HPD6_INT_STATUS - mmDC_HPD1_INT_STATUS,
};
static const uint32_t dig_offsets[] = {
SI_CRTC0_REGISTER_OFFSET,
SI_CRTC1_REGISTER_OFFSET,
SI_CRTC2_REGISTER_OFFSET,
SI_CRTC3_REGISTER_OFFSET,
SI_CRTC4_REGISTER_OFFSET,
SI_CRTC5_REGISTER_OFFSET,
(0x13830 - 0x7030) >> 2,
};
static const struct {
uint32_t reg;
uint32_t vblank;
uint32_t vline;
uint32_t hpd;
} interrupt_status_offsets[6] = { {
.reg = mmDISP_INTERRUPT_STATUS,
.vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
.vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
.hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
}, {
.reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
.vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
.vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
.hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
}, {
.reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
.vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
.vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
.hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
}, {
.reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
.vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
.vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
.hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
}, {
.reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
.vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
.vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
.hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
}, {
.reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
.vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
.vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
} };
static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
u32 block_offset, u32 reg)
{
unsigned long flags;
u32 r;
spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
return r;
}
static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
u32 block_offset, u32 reg, u32 v)
{
unsigned long flags;
spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset,
reg | AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_WRITE_EN_MASK);
WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
}
static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
{
if (crtc >= adev->mode_info.num_crtc)
return 0;
else
return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
}
static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
{
unsigned i;
/* Enable pflip interrupts */
for (i = 0; i < adev->mode_info.num_crtc; i++)
amdgpu_irq_get(adev, &adev->pageflip_irq, i);
}
static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
{
unsigned i;
/* Disable pflip interrupts */
for (i = 0; i < adev->mode_info.num_crtc; i++)
amdgpu_irq_put(adev, &adev->pageflip_irq, i);
}
/**
* dce_v6_0_page_flip - pageflip callback.
*
* @adev: amdgpu_device pointer
* @crtc_id: crtc to cleanup pageflip on
* @crtc_base: new address of the crtc (GPU MC address)
* @async: asynchronous flip
*
* Does the actual pageflip (evergreen+).
* During vblank we take the crtc lock and wait for the update_pending
* bit to go high, when it does, we release the lock, and allow the
* double buffered update to take place.
* Returns the current update pending status.
*/
static void dce_v6_0_page_flip(struct amdgpu_device *adev,
int crtc_id, u64 crtc_base, bool async)
{
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
/* flip at hsync for async, default is vsync */
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
/* update pitch */
WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
fb->pitches[0] / fb->format->cpp[0]);
/* update the scanout addresses */
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(crtc_base));
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
(u32)crtc_base);
/* post the write */
RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
}
static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
u32 *vbl, u32 *position)
{
if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
return -EINVAL;
*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
return 0;
}
/**
* dce_v6_0_hpd_sense - hpd sense callback.
*
* @adev: amdgpu_device pointer
* @hpd: hpd (hotplug detect) pin
*
* Checks if a digital monitor is connected (evergreen+).
* Returns true if connected, false if not connected.
*/
static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
enum amdgpu_hpd_id hpd)
{
bool connected = false;
if (hpd >= adev->mode_info.num_hpd)
return connected;
if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
connected = true;
return connected;
}
/**
* dce_v6_0_hpd_set_polarity - hpd set polarity callback.
*
* @adev: amdgpu_device pointer
* @hpd: hpd (hotplug detect) pin
*
* Set the polarity of the hpd pin (evergreen+).
*/
static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
enum amdgpu_hpd_id hpd)
{
u32 tmp;
bool connected = dce_v6_0_hpd_sense(adev, hpd);
if (hpd >= adev->mode_info.num_hpd)
return;
tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
if (connected)
tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
else
tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
}
/**
* dce_v6_0_hpd_init - hpd setup callback.
*
* @adev: amdgpu_device pointer
*
* Setup the hpd pins used by the card (evergreen+).
* Enable the pin, set the polarity, and enable the hpd interrupts.
*/
static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
{
struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
u32 tmp;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
continue;
tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
/* don't try to enable hpd on eDP or LVDS avoid breaking the
* aux dp channel on imac and help (but not completely fix)
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
* also avoid interrupt storms during dpms.
*/
tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
continue;
}
dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
drm_connector_list_iter_end(&iter);
}
/**
* dce_v6_0_hpd_fini - hpd tear down callback.
*
* @adev: amdgpu_device pointer
*
* Tear down the hpd pins used by the card (evergreen+).
* Disable the hpd interrupts.
*/
static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
{
struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
u32 tmp;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
continue;
tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
drm_connector_list_iter_end(&iter);
}
static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
{
return mmDC_GPIO_HPD_A;
}
static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
bool render)
{
if (!render)
WREG32(mmVGA_RENDER_CONTROL,
RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL);
}
static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
return 6;
case CHIP_OLAND:
return 2;
default:
return 0;
}
}
void dce_v6_0_disable_dce(struct amdgpu_device *adev)
{
/*Disable VGA render and enabled crtc, if has DCE engine*/
if (amdgpu_atombios_has_dce_engine_info(adev)) {
u32 tmp;
int crtc_enabled, i;
dce_v6_0_set_vga_render_state(adev, false);
/*Disable crtc*/
for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) {
crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) &
CRTC_CONTROL__CRTC_MASTER_EN_MASK;
if (crtc_enabled) {
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
}
}
}
static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
int bpc = 0;
u32 tmp = 0;
enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
if (connector) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
bpc = amdgpu_connector_get_monitor_bpc(connector);
dither = amdgpu_connector->dither;
}
/* LVDS FMT is set up by atom */
if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
return;
if (bpc == 0)
return;
switch (bpc) {
case 6:
if (dither == AMDGPU_FMT_DITHER_ENABLE)
/* XXX sort out optimal dither settings */
tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK);
else
tmp |= FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK;
break;
case 8:
if (dither == AMDGPU_FMT_DITHER_ENABLE)
/* XXX sort out optimal dither settings */
tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK);
else
tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK);
break;
case 10:
default:
/* not needed */
break;
}
WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
}
/**
* si_get_number_of_dram_channels - get the number of dram channels
*
* @adev: amdgpu_device pointer
*
* Look up the number of video ram channels (CIK).
* Used for display watermark bandwidth calculations
* Returns the number of dram channels
*/
static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
{
u32 tmp = RREG32(mmMC_SHARED_CHMAP);
switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
case 0:
default:
return 1;
case 1:
return 2;
case 2:
return 4;
case 3:
return 8;
case 4:
return 3;
case 5:
return 6;
case 6:
return 10;
case 7:
return 12;
case 8:
return 16;
}
}
struct dce6_wm_params {
u32 dram_channels; /* number of dram channels */
u32 yclk; /* bandwidth per dram data pin in kHz */
u32 sclk; /* engine clock in kHz */
u32 disp_clk; /* display clock in kHz */
u32 src_width; /* viewport width */
u32 active_time; /* active display time in ns */
u32 blank_time; /* blank time in ns */
bool interlaced; /* mode is interlaced */
fixed20_12 vsc; /* vertical scale ratio */
u32 num_heads; /* number of active crtcs */
u32 bytes_per_pixel; /* bytes per pixel display + overlay */
u32 lb_size; /* line buffer allocated to pipe */
u32 vtaps; /* vertical scaler taps */
};
/**
* dce_v6_0_dram_bandwidth - get the dram bandwidth
*
* @wm: watermark calculation data
*
* Calculate the raw dram bandwidth (CIK).
* Used for display watermark bandwidth calculations
* Returns the dram bandwidth in MBytes/s
*/
static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm)
{
/* Calculate raw DRAM Bandwidth */
fixed20_12 dram_efficiency; /* 0.7 */
fixed20_12 yclk, dram_channels, bandwidth;
fixed20_12 a;
a.full = dfixed_const(1000);
yclk.full = dfixed_const(wm->yclk);
yclk.full = dfixed_div(yclk, a);
dram_channels.full = dfixed_const(wm->dram_channels * 4);
a.full = dfixed_const(10);
dram_efficiency.full = dfixed_const(7);
dram_efficiency.full = dfixed_div(dram_efficiency, a);
bandwidth.full = dfixed_mul(dram_channels, yclk);
bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
return dfixed_trunc(bandwidth);
}
/**
* dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display
*
* @wm: watermark calculation data
*
* Calculate the dram bandwidth used for display (CIK).
* Used for display watermark bandwidth calculations
* Returns the dram bandwidth for display in MBytes/s
*/
static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm)
{
/* Calculate DRAM Bandwidth and the part allocated to display. */
fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
fixed20_12 yclk, dram_channels, bandwidth;
fixed20_12 a;
a.full = dfixed_const(1000);
yclk.full = dfixed_const(wm->yclk);
yclk.full = dfixed_div(yclk, a);
dram_channels.full = dfixed_const(wm->dram_channels * 4);
a.full = dfixed_const(10);
disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
bandwidth.full = dfixed_mul(dram_channels, yclk);
bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
return dfixed_trunc(bandwidth);
}
/**
* dce_v6_0_data_return_bandwidth - get the data return bandwidth
*
* @wm: watermark calculation data
*
* Calculate the data return bandwidth used for display (CIK).
* Used for display watermark bandwidth calculations
* Returns the data return bandwidth in MBytes/s
*/
static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm)
{
/* Calculate the display Data return Bandwidth */
fixed20_12 return_efficiency; /* 0.8 */
fixed20_12 sclk, bandwidth;
fixed20_12 a;
a.full = dfixed_const(1000);
sclk.full = dfixed_const(wm->sclk);
sclk.full = dfixed_div(sclk, a);
a.full = dfixed_const(10);
return_efficiency.full = dfixed_const(8);
return_efficiency.full = dfixed_div(return_efficiency, a);
a.full = dfixed_const(32);
bandwidth.full = dfixed_mul(a, sclk);
bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
return dfixed_trunc(bandwidth);
}
/**
* dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth
*
* @wm: watermark calculation data
*
* Calculate the dmif bandwidth used for display (CIK).
* Used for display watermark bandwidth calculations
* Returns the dmif bandwidth in MBytes/s
*/
static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm)
{
/* Calculate the DMIF Request Bandwidth */
fixed20_12 disp_clk_request_efficiency; /* 0.8 */
fixed20_12 disp_clk, bandwidth;
fixed20_12 a, b;
a.full = dfixed_const(1000);
disp_clk.full = dfixed_const(wm->disp_clk);
disp_clk.full = dfixed_div(disp_clk, a);
a.full = dfixed_const(32);
b.full = dfixed_mul(a, disp_clk);
a.full = dfixed_const(10);
disp_clk_request_efficiency.full = dfixed_const(8);
disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
return dfixed_trunc(bandwidth);
}
/**
* dce_v6_0_available_bandwidth - get the min available bandwidth
*
* @wm: watermark calculation data
*
* Calculate the min available bandwidth used for display (CIK).
* Used for display watermark bandwidth calculations
* Returns the min available bandwidth in MBytes/s
*/
static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm)
{
/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm);
u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm);
u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm);
return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
}
/**
* dce_v6_0_average_bandwidth - get the average available bandwidth
*
* @wm: watermark calculation data
*
* Calculate the average available bandwidth used for display (CIK).
* Used for display watermark bandwidth calculations
* Returns the average available bandwidth in MBytes/s
*/
static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm)
{
/* Calculate the display mode Average Bandwidth
* DisplayMode should contain the source and destination dimensions,
* timing, etc.
*/
fixed20_12 bpp;
fixed20_12 line_time;
fixed20_12 src_width;
fixed20_12 bandwidth;
fixed20_12 a;
a.full = dfixed_const(1000);
line_time.full = dfixed_const(wm->active_time + wm->blank_time);
line_time.full = dfixed_div(line_time, a);
bpp.full = dfixed_const(wm->bytes_per_pixel);
src_width.full = dfixed_const(wm->src_width);
bandwidth.full = dfixed_mul(src_width, bpp);
bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
bandwidth.full = dfixed_div(bandwidth, line_time);
return dfixed_trunc(bandwidth);
}
/**
* dce_v6_0_latency_watermark - get the latency watermark
*
* @wm: watermark calculation data
*
* Calculate the latency watermark (CIK).
* Used for display watermark bandwidth calculations
* Returns the latency watermark in ns
*/
static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
{
/* First calculate the latency in ns */
u32 mc_latency = 2000; /* 2000 ns. */
u32 available_bandwidth = dce_v6_0_available_bandwidth(wm);
u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
(wm->num_heads * cursor_line_pair_return_time);
u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
u32 tmp, dmif_size = 12288;
fixed20_12 a, b, c;
if (wm->num_heads == 0)
return 0;
a.full = dfixed_const(2);
b.full = dfixed_const(1);
if ((wm->vsc.full > a.full) ||
((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
(wm->vtaps >= 5) ||
((wm->vsc.full >= a.full) && wm->interlaced))
max_src_lines_per_dst_line = 4;
else
max_src_lines_per_dst_line = 2;
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
tmp = min(dfixed_trunc(a), tmp);
lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
c.full = dfixed_const(lb_fill_bw);
b.full = dfixed_div(c, b);
a.full = dfixed_div(a, b);
line_fill_time = dfixed_trunc(a);
if (line_fill_time < wm->active_time)
return latency;
else
return latency + (line_fill_time - wm->active_time);
}
/**
* dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check
* average and available dram bandwidth
*
* @wm: watermark calculation data
*
* Check if the display average bandwidth fits in the display
* dram bandwidth (CIK).
* Used for display watermark bandwidth calculations
* Returns true if the display fits, false if not.
*/
static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
{
if (dce_v6_0_average_bandwidth(wm) <=
(dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads))
return true;
else
return false;
}
/**
* dce_v6_0_average_bandwidth_vs_available_bandwidth - check
* average and available bandwidth
*
* @wm: watermark calculation data
*
* Check if the display average bandwidth fits in the display
* available bandwidth (CIK).
* Used for display watermark bandwidth calculations
* Returns true if the display fits, false if not.
*/
static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
{
if (dce_v6_0_average_bandwidth(wm) <=
(dce_v6_0_available_bandwidth(wm) / wm->num_heads))
return true;
else
return false;
}
/**
* dce_v6_0_check_latency_hiding - check latency hiding
*
* @wm: watermark calculation data
*
* Check latency hiding (CIK).
* Used for display watermark bandwidth calculations
* Returns true if the display fits, false if not.
*/
static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm)
{
u32 lb_partitions = wm->lb_size / wm->src_width;
u32 line_time = wm->active_time + wm->blank_time;
u32 latency_tolerant_lines;
u32 latency_hiding;
fixed20_12 a;
a.full = dfixed_const(1);
if (wm->vsc.full > a.full)
latency_tolerant_lines = 1;
else {
if (lb_partitions <= (wm->vtaps + 1))
latency_tolerant_lines = 1;
else
latency_tolerant_lines = 2;
}
latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
if (dce_v6_0_latency_watermark(wm) <= latency_hiding)
return true;
else
return false;
}
/**
* dce_v6_0_program_watermarks - program display watermarks
*
* @adev: amdgpu_device pointer
* @amdgpu_crtc: the selected display controller
* @lb_size: line buffer size
* @num_heads: number of display controllers in use
*
* Calculate and program the display watermarks for the
* selected display controller (CIK).
*/
static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
struct amdgpu_crtc *amdgpu_crtc,
u32 lb_size, u32 num_heads)
{
struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
struct dce6_wm_params wm_low, wm_high;
u32 dram_channels;
u32 active_time;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 priority_a_mark = 0, priority_b_mark = 0;
u32 priority_a_cnt = PRIORITY_OFF;
u32 priority_b_cnt = PRIORITY_OFF;
u32 tmp, arb_control3, lb_vblank_lead_lines = 0;
fixed20_12 a, b, c;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
(u32)mode->clock);
line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
(u32)mode->clock);
line_time = min(line_time, (u32)65535);
priority_a_cnt = 0;
priority_b_cnt = 0;
dram_channels = si_get_number_of_dram_channels(adev);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
wm_high.yclk =
amdgpu_dpm_get_mclk(adev, false) * 10;
wm_high.sclk =
amdgpu_dpm_get_sclk(adev, false) * 10;
} else {
wm_high.yclk = adev->pm.current_mclk * 10;
wm_high.sclk = adev->pm.current_sclk * 10;
}
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
wm_high.active_time = active_time;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
wm_high.interlaced = true;
wm_high.vsc = amdgpu_crtc->vsc;
wm_high.vtaps = 1;
if (amdgpu_crtc->rmx_type != RMX_OFF)
wm_high.vtaps = 2;
wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
wm_high.lb_size = lb_size;
wm_high.dram_channels = dram_channels;
wm_high.num_heads = num_heads;
if (adev->pm.dpm_enabled) {
/* watermark for low clocks */
wm_low.yclk =
amdgpu_dpm_get_mclk(adev, true) * 10;
wm_low.sclk =
amdgpu_dpm_get_sclk(adev, true) * 10;
} else {
wm_low.yclk = adev->pm.current_mclk * 10;
wm_low.sclk = adev->pm.current_sclk * 10;
}
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
wm_low.active_time = active_time;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
wm_low.interlaced = true;
wm_low.vsc = amdgpu_crtc->vsc;
wm_low.vtaps = 1;
if (amdgpu_crtc->rmx_type != RMX_OFF)
wm_low.vtaps = 2;
wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
wm_low.lb_size = lb_size;
wm_low.dram_channels = dram_channels;
wm_low.num_heads = num_heads;
/* set for high clocks */
latency_watermark_a = min(dce_v6_0_latency_watermark(&wm_high), (u32)65535);
/* set for low clocks */
latency_watermark_b = min(dce_v6_0_latency_watermark(&wm_low), (u32)65535);
/* possibly force display priority to high */
/* should really do this at mode validation time... */
if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
!dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
!dce_v6_0_check_latency_hiding(&wm_high) ||
(adev->mode_info.disp_priority == 2)) {
DRM_DEBUG_KMS("force priority to high\n");
priority_a_cnt |= PRIORITY_ALWAYS_ON;
priority_b_cnt |= PRIORITY_ALWAYS_ON;
}
if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
!dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
!dce_v6_0_check_latency_hiding(&wm_low) ||
(adev->mode_info.disp_priority == 2)) {
DRM_DEBUG_KMS("force priority to high\n");
priority_a_cnt |= PRIORITY_ALWAYS_ON;
priority_b_cnt |= PRIORITY_ALWAYS_ON;
}
a.full = dfixed_const(1000);
b.full = dfixed_const(mode->clock);
b.full = dfixed_div(b, a);
c.full = dfixed_const(latency_watermark_a);
c.full = dfixed_mul(c, b);
c.full = dfixed_mul(c, amdgpu_crtc->hsc);
c.full = dfixed_div(c, a);
a.full = dfixed_const(16);
c.full = dfixed_div(c, a);
priority_a_mark = dfixed_trunc(c);
priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
a.full = dfixed_const(1000);
b.full = dfixed_const(mode->clock);
b.full = dfixed_div(b, a);
c.full = dfixed_const(latency_watermark_b);
c.full = dfixed_mul(c, b);
c.full = dfixed_mul(c, amdgpu_crtc->hsc);
c.full = dfixed_div(c, a);
a.full = dfixed_const(16);
c.full = dfixed_div(c, a);
priority_b_mark = dfixed_trunc(c);
priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
}
/* select wm A */
arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
tmp = arb_control3;
tmp &= ~LATENCY_WATERMARK_MASK(3);
tmp |= LATENCY_WATERMARK_MASK(1);
WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
/* select wm B */
tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
tmp &= ~LATENCY_WATERMARK_MASK(3);
tmp |= LATENCY_WATERMARK_MASK(2);
WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
/* restore original selection */
WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
/* write the priority marks */
WREG32(mmPRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
WREG32(mmPRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
/* save values for DPM */
amdgpu_crtc->line_time = line_time;
amdgpu_crtc->wm_high = latency_watermark_a;
/* Save number of lines the linebuffer leads before the scanout */
amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
}
/* watermark setup */
static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
struct amdgpu_crtc *amdgpu_crtc,
struct drm_display_mode *mode,
struct drm_display_mode *other_mode)
{
u32 tmp, buffer_alloc, i;
u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
/*
* Line Buffer Setup
* There are 3 line buffers, each one shared by 2 display controllers.
* mmDC_LB_MEMORY_SPLIT controls how that line buffer is shared between
* the display controllers. The paritioning is done via one of four
* preset allocations specified in bits 21:20:
* 0 - half lb
* 2 - whole lb, other crtc must be disabled
*/
/* this can get tricky if we have two large displays on a paired group
* of crtcs. Ideally for multiple large displays we'd assign them to
* non-linked crtcs for maximum line buffer allocation.
*/
if (amdgpu_crtc->base.enabled && mode) {
if (other_mode) {
tmp = 0; /* 1/2 */
buffer_alloc = 1;
} else {
tmp = 2; /* whole */
buffer_alloc = 2;
}
} else {
tmp = 0;
buffer_alloc = 0;
}
WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
DC_LB_MEMORY_CONFIG(tmp));
WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
(buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
for (i = 0; i < adev->usec_timeout; i++) {
if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
break;
udelay(1);
}
if (amdgpu_crtc->base.enabled && mode) {
switch (tmp) {
case 0:
default:
return 4096 * 2;
case 2:
return 8192 * 2;
}
}
/* controller not enabled, so no lb used */
return 0;
}
/**
* dce_v6_0_bandwidth_update - program display watermarks
*
* @adev: amdgpu_device pointer
*
* Calculate and program the display watermarks and line
* buffer allocation (CIK).
*/
static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
{
struct drm_display_mode *mode0 = NULL;
struct drm_display_mode *mode1 = NULL;
u32 num_heads = 0, lb_size;
int i;
if (!adev->mode_info.mode_config_initialized)
return;
amdgpu_display_update_priority(adev);
for (i = 0; i < adev->mode_info.num_crtc; i++) {
if (adev->mode_info.crtcs[i]->base.enabled)
num_heads++;
}
for (i = 0; i < adev->mode_info.num_crtc; i += 2) {
mode0 = &adev->mode_info.crtcs[i]->base.mode;
mode1 = &adev->mode_info.crtcs[i+1]->base.mode;
lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1);
dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads);
lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0);
dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
}
}
static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
{
int i;
u32 tmp;
for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
tmp = RREG32_AUDIO_ENDPT(adev->mode_info.audio.pin[i].offset,
ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
if (REG_GET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT,
PORT_CONNECTIVITY))
adev->mode_info.audio.pin[i].connected = false;
else
adev->mode_info.audio.pin[i].connected = true;
}
}
static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev)
{
int i;
dce_v6_0_audio_get_connected_pins(adev);
for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
if (adev->mode_info.audio.pin[i].connected)
return &adev->mode_info.audio.pin[i];
}
DRM_ERROR("No connected audio pins found!\n");
return NULL;
}
static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder)
{
struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset,
REG_SET_FIELD(0, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT,
dig->afmt->pin->id));
}
static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
int interlace = 0;
u32 tmp;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
return;
}
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
interlace = 1;
if (connector->latency_present[interlace]) {
tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
VIDEO_LIPSYNC, connector->video_latency[interlace]);
tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
AUDIO_LIPSYNC, connector->audio_latency[interlace]);
} else {
tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
VIDEO_LIPSYNC, 0);
tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
AUDIO_LIPSYNC, 0);
}
WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
}
static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u8 *sadb = NULL;
int sad_count;
u32 tmp;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
return;
}
sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
if (sad_count < 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
sad_count = 0;
}
/* program the speaker allocation */
tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
HDMI_CONNECTION, 0);
tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
DP_CONNECTION, 0);
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)
tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
DP_CONNECTION, 1);
else
tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
HDMI_CONNECTION, 1);
if (sad_count)
tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
SPEAKER_ALLOCATION, sadb[0]);
else
tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
SPEAKER_ALLOCATION, 5); /* stereo */
WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
kfree(sadb);
}
static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
struct cea_sad *sads;
int i, sad_count;
static const u16 eld_reg_to_type[][2] = {
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
{ ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
};
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
return;
}
sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
if (sad_count <= 0)
return;
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
u32 tmp = 0;
u8 stereo_freqs = 0;
int max_channels = -1;
int j;
for (j = 0; j < sad_count; j++) {
struct cea_sad *sad = &sads[j];
if (sad->format == eld_reg_to_type[i][1]) {
if (sad->channels > max_channels) {
tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
MAX_CHANNELS, sad->channels);
tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
DESCRIPTOR_BYTE_2, sad->byte2);
tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
SUPPORTED_FREQUENCIES, sad->freq);
max_channels = sad->channels;
}
if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
stereo_freqs |= sad->freq;
else
break;
}
}
tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
}
kfree(sads);
}
static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
struct amdgpu_audio_pin *pin,
bool enable)
{
if (!pin)
return;
WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
}
static const u32 pin_offsets[7] =
{
(0x1780 - 0x1780),
(0x1786 - 0x1780),
(0x178c - 0x1780),
(0x1792 - 0x1780),
(0x1798 - 0x1780),
(0x179d - 0x1780),
(0x17a4 - 0x1780),
};
static int dce_v6_0_audio_init(struct amdgpu_device *adev)
{
int i;
if (!amdgpu_audio)
return 0;
adev->mode_info.audio.enabled = true;
switch (adev->asic_type) {
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
default:
adev->mode_info.audio.num_pins = 6;
break;
case CHIP_OLAND:
adev->mode_info.audio.num_pins = 2;
break;
}
for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
adev->mode_info.audio.pin[i].channels = -1;
adev->mode_info.audio.pin[i].rate = -1;
adev->mode_info.audio.pin[i].bits_per_sample = -1;
adev->mode_info.audio.pin[i].status_bits = 0;
adev->mode_info.audio.pin[i].category_code = 0;
adev->mode_info.audio.pin[i].connected = false;
adev->mode_info.audio.pin[i].offset = pin_offsets[i];
adev->mode_info.audio.pin[i].id = i;
dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
}
return 0;
}
static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
{
int i;
if (!amdgpu_audio)
return;
if (!adev->mode_info.audio.enabled)
return;
for (i = 0; i < adev->mode_info.audio.num_pins; i++)
dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
adev->mode_info.audio.enabled = false;
}
static void dce_v6_0_audio_set_vbi_packet(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 tmp;
tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1);
tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1);
WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
}
static void dce_v6_0_audio_set_acr(struct drm_encoder *encoder,
uint32_t clock, int bpc)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 tmp;
tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE,
bpc > 8 ? 0 : 1);
WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
}
static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
struct hdmi_avi_infoframe frame;
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
uint8_t *payload = buffer + 3;
uint8_t *header = buffer;
ssize_t err;
u32 tmp;
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
return;
}
err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
if (err < 0) {
DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
return;
}
WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
payload[0x0] | (payload[0x1] << 8) | (payload[0x2] << 16) | (payload[0x3] << 24));
WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
payload[0x4] | (payload[0x5] << 8) | (payload[0x6] << 16) | (payload[0x7] << 24));
WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
payload[0x8] | (payload[0x9] << 8) | (payload[0xA] << 16) | (payload[0xB] << 24));
WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
payload[0xC] | (payload[0xD] << 8) | (header[1] << 24));
tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
/* anything other than 0 */
tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1,
HDMI_AUDIO_INFO_LINE, 2);
WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
}
static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
u32 tmp;
/*
* Two dtos: generally use dto0 for hdmi, dto1 for dp.
* Express [24MHz / target pixel clock] as an exact rational
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
*/
tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
DCCG_AUDIO_DTO0_SOURCE_SEL, amdgpu_crtc->crtc_id);
if (em == ATOM_ENCODER_MODE_HDMI) {
tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
DCCG_AUDIO_DTO_SEL, 0);
} else if (ENCODER_MODE_IS_DP(em)) {
tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
DCCG_AUDIO_DTO_SEL, 1);
}
WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
if (em == ATOM_ENCODER_MODE_HDMI) {
WREG32(mmDCCG_AUDIO_DTO0_PHASE, 24000);
WREG32(mmDCCG_AUDIO_DTO0_MODULE, clock);
} else if (ENCODER_MODE_IS_DP(em)) {
WREG32(mmDCCG_AUDIO_DTO1_PHASE, 24000);
WREG32(mmDCCG_AUDIO_DTO1_MODULE, clock);
}
}
static void dce_v6_0_audio_set_packet(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 tmp;
tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset);
tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, 0xff);
WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset, tmp);
tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_RESET_FIFO_WHEN_AUDIO_DIS, 1);
tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
}
static void dce_v6_0_audio_set_mute(struct drm_encoder *encoder, bool mute)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 tmp;
tmp = RREG32(mmHDMI_GC + dig->afmt->offset);
tmp = REG_SET_FIELD(tmp, HDMI_GC, HDMI_GC_AVMUTE, mute ? 1 : 0);
WREG32(mmHDMI_GC + dig->afmt->offset, tmp);
}
static void dce_v6_0_audio_hdmi_enable(struct drm_encoder *encoder, bool enable)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 tmp;
if (enable) {
tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
} else {
tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 0);
tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 0);
tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 0);
tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 0);
WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 0);
WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
}
}
static void dce_v6_0_audio_dp_enable(struct drm_encoder *encoder, bool enable)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 tmp;
if (enable) {
tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
tmp = RREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset);
tmp = REG_SET_FIELD(tmp, DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, 1);
WREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset, tmp);
tmp = RREG32(mmDP_SEC_CNTL + dig->afmt->offset);
tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 1);
tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ATP_ENABLE, 1);
tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_AIP_ENABLE, 1);
tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
WREG32(mmDP_SEC_CNTL + dig->afmt->offset, tmp);
} else {
WREG32(mmDP_SEC_CNTL + dig->afmt->offset, 0);
}
}
static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
int bpc = 8;
if (!dig || !dig->afmt)
return;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
return;
}
if (!dig->afmt->enabled)
return;
dig->afmt->pin = dce_v6_0_audio_get_pin(adev);
if (!dig->afmt->pin)
return;
if (encoder->crtc) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
bpc = amdgpu_crtc->bpc;
}
/* disable audio before setting up hw */
dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
dce_v6_0_audio_set_mute(encoder, true);
dce_v6_0_audio_write_speaker_allocation(encoder);
dce_v6_0_audio_write_sad_regs(encoder);
dce_v6_0_audio_write_latency_fields(encoder, mode);
if (em == ATOM_ENCODER_MODE_HDMI) {
dce_v6_0_audio_set_dto(encoder, mode->clock);
dce_v6_0_audio_set_vbi_packet(encoder);
dce_v6_0_audio_set_acr(encoder, mode->clock, bpc);
} else if (ENCODER_MODE_IS_DP(em)) {
dce_v6_0_audio_set_dto(encoder, adev->clock.default_dispclk * 10);
}
dce_v6_0_audio_set_packet(encoder);
dce_v6_0_audio_select_pin(encoder);
dce_v6_0_audio_set_avi_infoframe(encoder, mode);
dce_v6_0_audio_set_mute(encoder, false);
if (em == ATOM_ENCODER_MODE_HDMI) {
dce_v6_0_audio_hdmi_enable(encoder, 1);
} else if (ENCODER_MODE_IS_DP(em)) {
dce_v6_0_audio_dp_enable(encoder, 1);
}
/* enable audio after setting up hw */
dce_v6_0_audio_enable(adev, dig->afmt->pin, true);
}
static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
if (!dig || !dig->afmt)
return;
/* Silent, r600_hdmi_enable will raise WARN for us */
if (enable && dig->afmt->enabled)
return;
if (!enable && !dig->afmt->enabled)
return;
if (!enable && dig->afmt->pin) {
dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
dig->afmt->pin = NULL;
}
dig->afmt->enabled = enable;
DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
}
static int dce_v6_0_afmt_init(struct amdgpu_device *adev)
{
int i, j;
for (i = 0; i < adev->mode_info.num_dig; i++)
adev->mode_info.afmt[i] = NULL;
/* DCE6 has audio blocks tied to DIG encoders */
for (i = 0; i < adev->mode_info.num_dig; i++) {
adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
if (adev->mode_info.afmt[i]) {
adev->mode_info.afmt[i]->offset = dig_offsets[i];
adev->mode_info.afmt[i]->id = i;
} else {
for (j = 0; j < i; j++) {
kfree(adev->mode_info.afmt[j]);
adev->mode_info.afmt[j] = NULL;
}
DRM_ERROR("Out of memory allocating afmt table\n");
return -ENOMEM;
}
}
return 0;
}
static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
{
int i;
for (i = 0; i < adev->mode_info.num_dig; i++) {
kfree(adev->mode_info.afmt[i]);
adev->mode_info.afmt[i] = NULL;
}
}
static const u32 vga_control_regs[6] =
{
mmD1VGA_CONTROL,
mmD2VGA_CONTROL,
mmD3VGA_CONTROL,
mmD4VGA_CONTROL,
mmD5VGA_CONTROL,
mmD6VGA_CONTROL,
};
static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
u32 vga_control;
vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | (enable ? 1 : 0));
}
static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
}
static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, int atomic)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
struct amdgpu_bo *abo;
uint64_t fb_location, tiling_flags;
uint32_t fb_format, fb_pitch_pixels, pipe_config;
u32 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE);
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
DRM_DEBUG_KMS("No FB bound\n");
return 0;
}
if (atomic)
target_fb = fb;
else
target_fb = crtc->primary->fb;
/* If atomic, assume fb object is pinned & idle & fenced and
* just update base pointers
*/
obj = target_fb->obj[0];
abo = gem_to_amdgpu_bo(obj);
r = amdgpu_bo_reserve(abo, false);
if (unlikely(r != 0))
return r;
if (!atomic) {
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(abo);
return -EINVAL;
}
}
fb_location = amdgpu_bo_gpu_offset(abo);
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
amdgpu_bo_unreserve(abo);
switch (target_fb->format->format) {
case DRM_FORMAT_C8:
fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) |
GRPH_FORMAT(GRPH_FORMAT_INDEXED));
break;
case DRM_FORMAT_XRGB4444:
case DRM_FORMAT_ARGB4444:
fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
GRPH_FORMAT(GRPH_FORMAT_ARGB4444));
#ifdef __BIG_ENDIAN
fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
#endif
break;
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_ARGB1555:
fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
GRPH_FORMAT(GRPH_FORMAT_ARGB1555));
#ifdef __BIG_ENDIAN
fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
#endif
break;
case DRM_FORMAT_BGRX5551:
case DRM_FORMAT_BGRA5551:
fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
GRPH_FORMAT(GRPH_FORMAT_BGRA5551));
#ifdef __BIG_ENDIAN
fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
#endif
break;
case DRM_FORMAT_RGB565:
fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
GRPH_FORMAT(GRPH_FORMAT_ARGB565));
#ifdef __BIG_ENDIAN
fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
#endif
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
#ifdef __BIG_ENDIAN
fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
#endif
break;
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
GRPH_FORMAT(GRPH_FORMAT_ARGB2101010));
#ifdef __BIG_ENDIAN
fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
#endif
/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
bypass_lut = true;
break;
case DRM_FORMAT_BGRX1010102:
case DRM_FORMAT_BGRA1010102:
fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
GRPH_FORMAT(GRPH_FORMAT_BGRA1010102));
#ifdef __BIG_ENDIAN
fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
#endif
/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
bypass_lut = true;
break;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
fb_swap = (GRPH_RED_CROSSBAR(GRPH_RED_SEL_B) |
GRPH_BLUE_CROSSBAR(GRPH_BLUE_SEL_R));
#ifdef __BIG_ENDIAN
fb_swap |= GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
#endif
break;
default:
DRM_ERROR("Unsupported screen format %p4cc\n",
&target_fb->format->format);
return -EINVAL;
}
if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
unsigned bankw, bankh, mtaspect, tile_split, num_banks;
bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
fb_format |= GRPH_NUM_BANKS(num_banks);
fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1);
fb_format |= GRPH_TILE_SPLIT(tile_split);
fb_format |= GRPH_BANK_WIDTH(bankw);
fb_format |= GRPH_BANK_HEIGHT(bankh);
fb_format |= GRPH_MACRO_TILE_ASPECT(mtaspect);
} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1);
}
pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
fb_format |= GRPH_PIPE_CONFIG(pipe_config);
dce_v6_0_vga_enable(crtc, false);
/* Make sure surface address is updated at vertical blank rather than
* horizontal blank
*/
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(fb_location));
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(fb_location));
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
(u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
(u32) fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
/*
* The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
* for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
* retain the full precision throughout the pipeline.
*/
WREG32_P(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset,
(bypass_lut ? GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK : 0),
~GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK);
if (bypass_lut)
DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
dce_v6_0_grph_enable(crtc, true);
WREG32(mmDESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
target_fb->height);
x &= ~3;
y &= ~1;
WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
(x << 16) | y);
viewport_w = crtc->mode.hdisplay;
viewport_h = (crtc->mode.vdisplay + 1) & ~1;
WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
(viewport_w << 16) | viewport_h);
/* set pageflip to happen anywhere in vblank interval */
WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
if (!atomic && fb && fb != crtc->primary->fb) {
abo = gem_to_amdgpu_bo(fb->obj[0]);
r = amdgpu_bo_reserve(abo, true);
if (unlikely(r != 0))
return r;
amdgpu_bo_unpin(abo);
amdgpu_bo_unreserve(abo);
}
/* Bytes per pixel may have changed */
dce_v6_0_bandwidth_update(adev);
return 0;
}
static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset,
INTERLEAVE_EN);
else
WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
}
static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
u16 *r, *g, *b;
int i;
DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
(0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
(0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
r = crtc->gamma_store;
g = r + crtc->gamma_size;
b = g + crtc->gamma_size;
for (i = 0; i < 256; i++) {
WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
((*r++ & 0xffc0) << 14) |
((*g++ & 0xffc0) << 4) |
(*b++ >> 6));
}
WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
(0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
ICON_DEGAMMA_MODE(0) |
(0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
(0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
(0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
(0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
/* XXX match this to the depth of the crtc fmt block, move to modeset? */
WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
}
static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder)
{
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
switch (amdgpu_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
return dig->linkb ? 1 : 0;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
return dig->linkb ? 3 : 2;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
return dig->linkb ? 5 : 4;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
return 6;
default:
DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
return 0;
}
}
/**
* dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc.
*
* @crtc: drm crtc
*
* Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
* a single PPLL can be used for all DP crtcs/encoders. For non-DP
* monitors a dedicated PPLL must be used. If a particular board has
* an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
* as there is no need to program the PLL itself. If we are not able to
* allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
* avoid messing up an existing monitor.
*
*
*/
static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
u32 pll_in_use;
int pll;
if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
if (adev->clock.dp_extclk)
/* skip PPLL programming if using ext clock */
return ATOM_PPLL_INVALID;
else
return ATOM_PPLL0;
} else {
/* use the same PPLL for all monitors with the same clock */
pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
if (pll != ATOM_PPLL_INVALID)
return pll;
}
/* PPLL1, and PPLL2 */
pll_in_use = amdgpu_pll_get_use_mask(crtc);
if (!(pll_in_use & (1 << ATOM_PPLL2)))
return ATOM_PPLL2;
if (!(pll_in_use & (1 << ATOM_PPLL1)))
return ATOM_PPLL1;
DRM_ERROR("unable to allocate a PPLL\n");
return ATOM_PPLL_INVALID;
}
static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
{
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
uint32_t cur_lock;
cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
if (lock)
cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
else
cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
}
static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
(CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
(CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
}
static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(amdgpu_crtc->cursor_addr));
WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
lower_32_bits(amdgpu_crtc->cursor_addr));
WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
CUR_CONTROL__CURSOR_EN_MASK |
(CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
(CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
}
static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
int x, int y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
int xorigin = 0, yorigin = 0;
int w = amdgpu_crtc->cursor_width;
amdgpu_crtc->cursor_x = x;
amdgpu_crtc->cursor_y = y;
/* avivo cursor are offset into the total surface */
x += crtc->x;
y += crtc->y;
DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
if (x < 0) {
xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
x = 0;
}
if (y < 0) {
yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
y = 0;
}
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
return 0;
}
static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc,
int x, int y)
{
int ret;
dce_v6_0_lock_cursor(crtc, true);
ret = dce_v6_0_cursor_move_locked(crtc, x, y);
dce_v6_0_lock_cursor(crtc, false);
return ret;
}
static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
struct drm_file *file_priv,
uint32_t handle,
uint32_t width,
uint32_t height,
int32_t hot_x,
int32_t hot_y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_gem_object *obj;
struct amdgpu_bo *aobj;
int ret;
if (!handle) {
/* turn off cursor */
dce_v6_0_hide_cursor(crtc);
obj = NULL;
goto unpin;
}
if ((width > amdgpu_crtc->max_cursor_width) ||
(height > amdgpu_crtc->max_cursor_height)) {
DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
return -EINVAL;
}
obj = drm_gem_object_lookup(file_priv, handle);
if (!obj) {
DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
return -ENOENT;
}
aobj = gem_to_amdgpu_bo(obj);
ret = amdgpu_bo_reserve(aobj, false);
if (ret != 0) {
drm_gem_object_put(obj);
return ret;
}
ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
drm_gem_object_put(obj);
return ret;
}
amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
dce_v6_0_lock_cursor(crtc, true);
if (width != amdgpu_crtc->cursor_width ||
height != amdgpu_crtc->cursor_height ||
hot_x != amdgpu_crtc->cursor_hot_x ||
hot_y != amdgpu_crtc->cursor_hot_y) {
int x, y;
x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
dce_v6_0_cursor_move_locked(crtc, x, y);
amdgpu_crtc->cursor_width = width;
amdgpu_crtc->cursor_height = height;
amdgpu_crtc->cursor_hot_x = hot_x;
amdgpu_crtc->cursor_hot_y = hot_y;
}
dce_v6_0_show_cursor(crtc);
dce_v6_0_lock_cursor(crtc, false);
unpin:
if (amdgpu_crtc->cursor_bo) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
ret = amdgpu_bo_reserve(aobj, true);
if (likely(ret == 0)) {
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
}
drm_gem_object_put(amdgpu_crtc->cursor_bo);
}
amdgpu_crtc->cursor_bo = obj;
return 0;
}
static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
if (amdgpu_crtc->cursor_bo) {
dce_v6_0_lock_cursor(crtc, true);
dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
amdgpu_crtc->cursor_y);
dce_v6_0_show_cursor(crtc);
dce_v6_0_lock_cursor(crtc, false);
}
}
static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t size,
struct drm_modeset_acquire_ctx *ctx)
{
dce_v6_0_crtc_load_lut(crtc);
return 0;
}
static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
drm_crtc_cleanup(crtc);
kfree(amdgpu_crtc);
}
static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
.cursor_set2 = dce_v6_0_crtc_cursor_set2,
.cursor_move = dce_v6_0_crtc_cursor_move,
.gamma_set = dce_v6_0_crtc_gamma_set,
.set_config = amdgpu_display_crtc_set_config,
.destroy = dce_v6_0_crtc_destroy,
.page_flip_target = amdgpu_display_crtc_page_flip_target,
.get_vblank_counter = amdgpu_get_vblank_counter_kms,
.enable_vblank = amdgpu_enable_vblank_kms,
.disable_vblank = amdgpu_disable_vblank_kms,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
unsigned type;
switch (mode) {
case DRM_MODE_DPMS_ON:
amdgpu_crtc->enabled = true;
amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
/* Make sure VBLANK and PFLIP interrupts are still enabled */
type = amdgpu_display_crtc_idx_to_irq_type(adev,
amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
drm_crtc_vblank_on(crtc);
dce_v6_0_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
drm_crtc_vblank_off(crtc);
if (amdgpu_crtc->enabled)
amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
amdgpu_crtc->enabled = false;
break;
}
/* adjust pm to dpms */
amdgpu_dpm_compute_clocks(adev);
}
static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc)
{
/* disable crtc pair power gating before programming */
amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
}
static void dce_v6_0_crtc_commit(struct drm_crtc *crtc)
{
dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
}
static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_atom_ss ss;
int i;
dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc->primary->fb) {
int r;
struct amdgpu_bo *abo;
abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
r = amdgpu_bo_reserve(abo, true);
if (unlikely(r))
DRM_ERROR("failed to reserve abo before unpin\n");
else {
amdgpu_bo_unpin(abo);
amdgpu_bo_unreserve(abo);
}
}
/* disable the GRPH */
dce_v6_0_grph_enable(crtc, false);
amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
for (i = 0; i < adev->mode_info.num_crtc; i++) {
if (adev->mode_info.crtcs[i] &&
adev->mode_info.crtcs[i]->enabled &&
i != amdgpu_crtc->crtc_id &&
amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
/* one other crtc is using this pll don't turn
* off the pll
*/
goto done;
}
}
switch (amdgpu_crtc->pll_id) {
case ATOM_PPLL1:
case ATOM_PPLL2:
/* disable the ppll */
amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
break;
default:
break;
}
done:
amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
amdgpu_crtc->adjusted_clock = 0;
amdgpu_crtc->encoder = NULL;
amdgpu_crtc->connector = NULL;
}
static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y, struct drm_framebuffer *old_fb)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
if (!amdgpu_crtc->adjusted_clock)
return -EINVAL;
amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
amdgpu_atombios_crtc_scaler_setup(crtc);
dce_v6_0_cursor_reset(crtc);
/* update the hw version fpr dpm */
amdgpu_crtc->hw_mode = *adjusted_mode;
return 0;
}
static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder;
/* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
amdgpu_crtc->encoder = encoder;
amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
break;
}
}
if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
amdgpu_crtc->encoder = NULL;
amdgpu_crtc->connector = NULL;
return false;
}
if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
return false;
/* pick pll */
amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc);
/* if we can't get a PPLL for a non-DP encoder, fail */
if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
!ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
return false;
return true;
}
static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
}
static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, enum mode_set_atomic state)
{
return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1);
}
static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
.dpms = dce_v6_0_crtc_dpms,
.mode_fixup = dce_v6_0_crtc_mode_fixup,
.mode_set = dce_v6_0_crtc_mode_set,
.mode_set_base = dce_v6_0_crtc_set_base,
.mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic,
.prepare = dce_v6_0_crtc_prepare,
.commit = dce_v6_0_crtc_commit,
.disable = dce_v6_0_crtc_disable,
.get_scanout_position = amdgpu_crtc_get_scanout_position,
};
static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
{
struct amdgpu_crtc *amdgpu_crtc;
amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
(AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
if (amdgpu_crtc == NULL)
return -ENOMEM;
drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
amdgpu_crtc->crtc_id = index;
adev->mode_info.crtcs[index] = amdgpu_crtc;
amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
amdgpu_crtc->adjusted_clock = 0;
amdgpu_crtc->encoder = NULL;
amdgpu_crtc->connector = NULL;
drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
return 0;
}
static int dce_v6_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg;
adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
dce_v6_0_set_display_funcs(adev);
adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev);
switch (adev->asic_type) {
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
break;
case CHIP_OLAND:
adev->mode_info.num_hpd = 2;
adev->mode_info.num_dig = 2;
break;
default:
return -EINVAL;
}
dce_v6_0_set_irq_funcs(adev);
return 0;
}
static int dce_v6_0_sw_init(void *handle)
{
int r, i;
bool ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->mode_info.num_crtc; i++) {
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
if (r)
return r;
}
for (i = 8; i < 20; i += 2) {
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
if (r)
return r;
}
/* HPD hotplug */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq);
if (r)
return r;
adev->mode_info.mode_config_initialized = true;
adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
adev_to_drm(adev)->mode_config.async_page_flip = true;
adev_to_drm(adev)->mode_config.max_width = 16384;
adev_to_drm(adev)->mode_config.max_height = 16384;
adev_to_drm(adev)->mode_config.preferred_depth = 24;
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;
adev_to_drm(adev)->mode_config.max_width = 16384;
adev_to_drm(adev)->mode_config.max_height = 16384;
/* allocate crtcs */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
r = dce_v6_0_crtc_init(adev, i);
if (r)
return r;
}
ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
if (ret)
amdgpu_display_print_display_setup(adev_to_drm(adev));
else
return -EINVAL;
/* setup afmt */
r = dce_v6_0_afmt_init(adev);
if (r)
return r;
r = dce_v6_0_audio_init(adev);
if (r)
return r;
/* Disable vblank IRQs aggressively for power-saving */
/* XXX: can this be enabled for DC? */
adev_to_drm(adev)->vblank_disable_immediate = true;
r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
if (r)
return r;
/* Pre-DCE11 */
INIT_DELAYED_WORK(&adev->hotplug_work,
amdgpu_display_hotplug_work_func);
drm_kms_helper_poll_init(adev_to_drm(adev));
return r;
}
static int dce_v6_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
kfree(adev->mode_info.bios_hardcoded_edid);
drm_kms_helper_poll_fini(adev_to_drm(adev));
dce_v6_0_audio_fini(adev);
dce_v6_0_afmt_fini(adev);
drm_mode_config_cleanup(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = false;
return 0;
}
static int dce_v6_0_hw_init(void *handle)
{
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* disable vga render */
dce_v6_0_set_vga_render_state(adev, false);
/* init dig PHYs, disp eng pll */
amdgpu_atombios_encoder_init_dig(adev);
amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
/* initialize hpd */
dce_v6_0_hpd_init(adev);
for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
}
dce_v6_0_pageflip_interrupt_init(adev);
return 0;
}
static int dce_v6_0_hw_fini(void *handle)
{
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
dce_v6_0_hpd_fini(adev);
for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
}
dce_v6_0_pageflip_interrupt_fini(adev);
flush_delayed_work(&adev->hotplug_work);
return 0;
}
static int dce_v6_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
r = amdgpu_display_suspend_helper(adev);
if (r)
return r;
adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
return dce_v6_0_hw_fini(handle);
}
static int dce_v6_0_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
adev->mode_info.bl_level);
ret = dce_v6_0_hw_init(handle);
/* turn on the BL */
if (adev->mode_info.bl_encoder) {
u8 bl_level = amdgpu_display_backlight_get_level(adev,
adev->mode_info.bl_encoder);
amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
bl_level);
}
if (ret)
return ret;
return amdgpu_display_resume_helper(adev);
}
static bool dce_v6_0_is_idle(void *handle)
{
return true;
}
static int dce_v6_0_wait_for_idle(void *handle)
{
return 0;
}
static int dce_v6_0_soft_reset(void *handle)
{
DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
return 0;
}
static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
int crtc,
enum amdgpu_interrupt_state state)
{
u32 reg_block, interrupt_mask;
if (crtc >= adev->mode_info.num_crtc) {
DRM_DEBUG("invalid crtc %d\n", crtc);
return;
}
switch (crtc) {
case 0:
reg_block = SI_CRTC0_REGISTER_OFFSET;
break;
case 1:
reg_block = SI_CRTC1_REGISTER_OFFSET;
break;
case 2:
reg_block = SI_CRTC2_REGISTER_OFFSET;
break;
case 3:
reg_block = SI_CRTC3_REGISTER_OFFSET;
break;
case 4:
reg_block = SI_CRTC4_REGISTER_OFFSET;
break;
case 5:
reg_block = SI_CRTC5_REGISTER_OFFSET;
break;
default:
DRM_DEBUG("invalid crtc %d\n", crtc);
return;
}
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
interrupt_mask = RREG32(mmINT_MASK + reg_block);
interrupt_mask &= ~VBLANK_INT_MASK;
WREG32(mmINT_MASK + reg_block, interrupt_mask);
break;
case AMDGPU_IRQ_STATE_ENABLE:
interrupt_mask = RREG32(mmINT_MASK + reg_block);
interrupt_mask |= VBLANK_INT_MASK;
WREG32(mmINT_MASK + reg_block, interrupt_mask);
break;
default:
break;
}
}
static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
int crtc,
enum amdgpu_interrupt_state state)
{
}
static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
{
u32 dc_hpd_int_cntl;
if (type >= adev->mode_info.num_hpd) {
DRM_DEBUG("invalid hdp %d\n", type);
return 0;
}
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
dc_hpd_int_cntl |= DC_HPDx_INT_EN;
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
break;
default:
break;
}
return 0;
}
static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
{
switch (type) {
case AMDGPU_CRTC_IRQ_VBLANK1:
dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state);
break;
case AMDGPU_CRTC_IRQ_VBLANK2:
dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state);
break;
case AMDGPU_CRTC_IRQ_VBLANK3:
dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state);
break;
case AMDGPU_CRTC_IRQ_VBLANK4:
dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state);
break;
case AMDGPU_CRTC_IRQ_VBLANK5:
dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state);
break;
case AMDGPU_CRTC_IRQ_VBLANK6:
dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state);
break;
case AMDGPU_CRTC_IRQ_VLINE1:
dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state);
break;
case AMDGPU_CRTC_IRQ_VLINE2:
dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state);
break;
case AMDGPU_CRTC_IRQ_VLINE3:
dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state);
break;
case AMDGPU_CRTC_IRQ_VLINE4:
dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state);
break;
case AMDGPU_CRTC_IRQ_VLINE5:
dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state);
break;
case AMDGPU_CRTC_IRQ_VLINE6:
dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state);
break;
default:
break;
}
return 0;
}
static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
unsigned crtc = entry->src_id - 1;
uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
crtc);
switch (entry->src_data[0]) {
case 0: /* vblank */
if (disp_int & interrupt_status_offsets[crtc].vblank)
WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
else
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
if (amdgpu_irq_enabled(adev, source, irq_type)) {
drm_handle_vblank(adev_to_drm(adev), crtc);
}
DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
break;
case 1: /* vline */
if (disp_int & interrupt_status_offsets[crtc].vline)
WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
else
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
DRM_DEBUG("IH: D%d vline\n", crtc + 1);
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
break;
}
return 0;
}
static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
{
u32 reg;
if (type >= adev->mode_info.num_crtc) {
DRM_ERROR("invalid pageflip crtc %d\n", type);
return -EINVAL;
}
reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
if (state == AMDGPU_IRQ_STATE_DISABLE)
WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
else
WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
return 0;
}
static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
unsigned long flags;
unsigned crtc_id;
struct amdgpu_crtc *amdgpu_crtc;
struct amdgpu_flip_work *works;
crtc_id = (entry->src_id - 8) >> 1;
amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
if (crtc_id >= adev->mode_info.num_crtc) {
DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
return -EINVAL;
}
if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
/* IRQ could occur when in initial stage */
if (amdgpu_crtc == NULL)
return 0;
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
works = amdgpu_crtc->pflip_works;
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
"AMDGPU_FLIP_SUBMITTED(%d)\n",
amdgpu_crtc->pflip_status,
AMDGPU_FLIP_SUBMITTED);
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
return 0;
}
/* page flip completed. clean up */
amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
amdgpu_crtc->pflip_works = NULL;
/* wakeup usersapce */
if (works->event)
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work);
return 0;
}
static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
uint32_t disp_int, mask, tmp;
unsigned hpd;
if (entry->src_data[0] >= adev->mode_info.num_hpd) {
DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
return 0;
}
hpd = entry->src_data[0];
disp_int = RREG32(interrupt_status_offsets[hpd].reg);
mask = interrupt_status_offsets[hpd].hpd;
if (disp_int & mask) {
tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
schedule_delayed_work(&adev->hotplug_work, 0);
DRM_DEBUG("IH: HPD%d\n", hpd + 1);
}
return 0;
}
static int dce_v6_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
return 0;
}
static int dce_v6_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
return 0;
}
static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
.name = "dce_v6_0",
.early_init = dce_v6_0_early_init,
.late_init = NULL,
.sw_init = dce_v6_0_sw_init,
.sw_fini = dce_v6_0_sw_fini,
.hw_init = dce_v6_0_hw_init,
.hw_fini = dce_v6_0_hw_fini,
.suspend = dce_v6_0_suspend,
.resume = dce_v6_0_resume,
.is_idle = dce_v6_0_is_idle,
.wait_for_idle = dce_v6_0_wait_for_idle,
.soft_reset = dce_v6_0_soft_reset,
.set_clockgating_state = dce_v6_0_set_clockgating_state,
.set_powergating_state = dce_v6_0_set_powergating_state,
};
static void
dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
amdgpu_encoder->pixel_clock = adjusted_mode->clock;
/* need to call this here rather than in prepare() since we need some crtc info */
amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
/* set scaler clears this on some chips */
dce_v6_0_set_interleave(encoder->crtc, mode);
if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em)) {
dce_v6_0_afmt_enable(encoder, true);
dce_v6_0_afmt_setmode(encoder, adjusted_mode);
}
}
static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
{
struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
if ((amdgpu_encoder->active_device &
(ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
(amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
ENCODER_OBJECT_ID_NONE)) {
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
if (dig) {
dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder);
if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
}
}
amdgpu_atombios_scratch_regs_lock(adev, true);
if (connector) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
/* select the clock/data port if it uses a router */
if (amdgpu_connector->router.cd_valid)
amdgpu_i2c_router_select_cd_port(amdgpu_connector);
/* turn eDP panel on for mode set */
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
amdgpu_atombios_encoder_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_ON);
}
/* this is needed for the pll/ss setup to work correctly in some cases */
amdgpu_atombios_encoder_set_crtc_source(encoder);
/* set up the FMT blocks */
dce_v6_0_program_fmt(encoder);
}
static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
/* need to call this here as we need the crtc set up */
amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
amdgpu_atombios_scratch_regs_lock(adev, false);
}
static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
{
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig;
int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
if (amdgpu_atombios_encoder_is_digital(encoder)) {
if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em))
dce_v6_0_afmt_enable(encoder, false);
dig = amdgpu_encoder->enc_priv;
dig->dig_encoder = -1;
}
amdgpu_encoder->active_device = 0;
}
/* these are handled by the primary encoders */
static void dce_v6_0_ext_prepare(struct drm_encoder *encoder)
{
}
static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
{
}
static void
dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
}
static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
{
}
static void
dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
{
}
static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = {
.dpms = dce_v6_0_ext_dpms,
.mode_fixup = dce_v6_0_ext_mode_fixup,
.prepare = dce_v6_0_ext_prepare,
.mode_set = dce_v6_0_ext_mode_set,
.commit = dce_v6_0_ext_commit,
.disable = dce_v6_0_ext_disable,
/* no detect for TMDS/LVDS yet */
};
static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = {
.dpms = amdgpu_atombios_encoder_dpms,
.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
.prepare = dce_v6_0_encoder_prepare,
.mode_set = dce_v6_0_encoder_mode_set,
.commit = dce_v6_0_encoder_commit,
.disable = dce_v6_0_encoder_disable,
.detect = amdgpu_atombios_encoder_dig_detect,
};
static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = {
.dpms = amdgpu_atombios_encoder_dpms,
.mode_fixup = amdgpu_atombios_encoder_mode_fixup,
.prepare = dce_v6_0_encoder_prepare,
.mode_set = dce_v6_0_encoder_mode_set,
.commit = dce_v6_0_encoder_commit,
.detect = amdgpu_atombios_encoder_dac_detect,
};
static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder)
{
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
kfree(amdgpu_encoder->enc_priv);
drm_encoder_cleanup(encoder);
kfree(amdgpu_encoder);
}
static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = {
.destroy = dce_v6_0_encoder_destroy,
};
static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
uint32_t encoder_enum,
uint32_t supported_device,
u16 caps)
{
struct drm_device *dev = adev_to_drm(adev);
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
/* see if we already added it */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
amdgpu_encoder = to_amdgpu_encoder(encoder);
if (amdgpu_encoder->encoder_enum == encoder_enum) {
amdgpu_encoder->devices |= supported_device;
return;
}
}
/* add a new one */
amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
if (!amdgpu_encoder)
return;
encoder = &amdgpu_encoder->base;
switch (adev->mode_info.num_crtc) {
case 1:
encoder->possible_crtcs = 0x1;
break;
case 2:
default:
encoder->possible_crtcs = 0x3;
break;
case 4:
encoder->possible_crtcs = 0xf;
break;
case 6:
encoder->possible_crtcs = 0x3f;
break;
}
amdgpu_encoder->enc_priv = NULL;
amdgpu_encoder->encoder_enum = encoder_enum;
amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
amdgpu_encoder->devices = supported_device;
amdgpu_encoder->rmx_type = RMX_OFF;
amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
amdgpu_encoder->is_ext_encoder = false;
amdgpu_encoder->caps = caps;
switch (amdgpu_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
DRM_MODE_ENCODER_DAC, NULL);
drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs);
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
amdgpu_encoder->rmx_type = RMX_FULL;
drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
DRM_MODE_ENCODER_LVDS, NULL);
amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
} else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
DRM_MODE_ENCODER_DAC, NULL);
amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
} else {
drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
}
drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs);
break;
case ENCODER_OBJECT_ID_SI170B:
case ENCODER_OBJECT_ID_CH7303:
case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
case ENCODER_OBJECT_ID_TITFP513:
case ENCODER_OBJECT_ID_VT1623:
case ENCODER_OBJECT_ID_HDMI_SI1930:
case ENCODER_OBJECT_ID_TRAVIS:
case ENCODER_OBJECT_ID_NUTMEG:
/* these are handled by the primary encoders */
amdgpu_encoder->is_ext_encoder = true;
if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
DRM_MODE_ENCODER_LVDS, NULL);
else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
DRM_MODE_ENCODER_DAC, NULL);
else
drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs);
break;
}
}
static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
.bandwidth_update = &dce_v6_0_bandwidth_update,
.vblank_get_counter = &dce_v6_0_vblank_get_counter,
.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
.hpd_sense = &dce_v6_0_hpd_sense,
.hpd_set_polarity = &dce_v6_0_hpd_set_polarity,
.hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg,
.page_flip = &dce_v6_0_page_flip,
.page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
.add_encoder = &dce_v6_0_encoder_add,
.add_connector = &amdgpu_connector_add,
};
static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
{
adev->mode_info.funcs = &dce_v6_0_display_funcs;
}
static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
.set = dce_v6_0_set_crtc_interrupt_state,
.process = dce_v6_0_crtc_irq,
};
static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
.set = dce_v6_0_set_pageflip_interrupt_state,
.process = dce_v6_0_pageflip_irq,
};
static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
.set = dce_v6_0_set_hpd_interrupt_state,
.process = dce_v6_0_hpd_irq,
};
static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
{
if (adev->mode_info.num_crtc > 0)
adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
else
adev->crtc_irq.num_types = 0;
adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
adev->hpd_irq.num_types = adev->mode_info.num_hpd;
adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
}
const struct amdgpu_ip_block_version dce_v6_0_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_DCE,
.major = 6,
.minor = 0,
.rev = 0,
.funcs = &dce_v6_0_ip_funcs,
};
const struct amdgpu_ip_block_version dce_v6_4_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_DCE,
.major = 6,
.minor = 4,
.rev = 0,
.funcs = &dce_v6_0_ip_funcs,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/dce_v6_0.c |
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Author: Huang Rui
*
*/
#include <linux/firmware.h>
#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_psp.h"
#include "amdgpu_ucode.h"
#include "amdgpu_xgmi.h"
#include "soc15_common.h"
#include "psp_v3_1.h"
#include "psp_v10_0.h"
#include "psp_v11_0.h"
#include "psp_v11_0_8.h"
#include "psp_v12_0.h"
#include "psp_v13_0.h"
#include "psp_v13_0_4.h"
#include "amdgpu_ras.h"
#include "amdgpu_securedisplay.h"
#include "amdgpu_atomfirmware.h"
#define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*3)
static int psp_load_smu_fw(struct psp_context *psp);
static int psp_rap_terminate(struct psp_context *psp);
static int psp_securedisplay_terminate(struct psp_context *psp);
static int psp_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type)
{
int ret = 0;
struct psp_ring *ring;
struct amdgpu_device *adev = psp->adev;
ring = &psp->km_ring;
ring->ring_type = ring_type;
/* allocate 4k Page of Local Frame Buffer memory for ring */
ring->ring_size = 0x1000;
ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->firmware.rbuf,
&ring->ring_mem_mc_addr,
(void **)&ring->ring_mem);
if (ret) {
ring->ring_size = 0;
return ret;
}
return 0;
}
/*
* Due to DF Cstate management centralized to PMFW, the firmware
* loading sequence will be updated as below:
* - Load KDB
* - Load SYS_DRV
* - Load tOS
* - Load PMFW
* - Setup TMR
* - Load other non-psp fw
* - Load ASD
* - Load XGMI/RAS/HDCP/DTM TA if any
*
* This new sequence is required for
* - Arcturus and onwards
*/
static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
if (amdgpu_sriov_vf(adev)) {
psp->pmfw_centralized_cstate_management = false;
return;
}
switch (adev->ip_versions[MP0_HWIP][0]) {
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 4):
case IP_VERSION(11, 0, 5):
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 9):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 7):
psp->pmfw_centralized_cstate_management = true;
break;
default:
psp->pmfw_centralized_cstate_management = false;
break;
}
}
static int psp_init_sriov_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
char ucode_prefix[30];
int ret = 0;
amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
switch (adev->ip_versions[MP0_HWIP][0]) {
case IP_VERSION(9, 0, 0):
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 9):
adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
ret = psp_init_cap_microcode(psp, ucode_prefix);
break;
case IP_VERSION(13, 0, 2):
adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
ret = psp_init_cap_microcode(psp, ucode_prefix);
ret &= psp_init_ta_microcode(psp, ucode_prefix);
break;
case IP_VERSION(13, 0, 0):
adev->virt.autoload_ucode_id = 0;
break;
case IP_VERSION(13, 0, 6):
ret = psp_init_cap_microcode(psp, ucode_prefix);
ret &= psp_init_ta_microcode(psp, ucode_prefix);
break;
case IP_VERSION(13, 0, 10):
adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
ret = psp_init_cap_microcode(psp, ucode_prefix);
break;
default:
return -EINVAL;
}
return ret;
}
static int psp_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
switch (adev->ip_versions[MP0_HWIP][0]) {
case IP_VERSION(9, 0, 0):
psp_v3_1_set_psp_funcs(psp);
psp->autoload_supported = false;
break;
case IP_VERSION(10, 0, 0):
case IP_VERSION(10, 0, 1):
psp_v10_0_set_psp_funcs(psp);
psp->autoload_supported = false;
break;
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 4):
psp_v11_0_set_psp_funcs(psp);
psp->autoload_supported = false;
break;
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 7):
adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev);
fallthrough;
case IP_VERSION(11, 0, 5):
case IP_VERSION(11, 0, 9):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
psp_v11_0_set_psp_funcs(psp);
psp->autoload_supported = true;
break;
case IP_VERSION(11, 0, 3):
case IP_VERSION(12, 0, 1):
psp_v12_0_set_psp_funcs(psp);
break;
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
psp_v13_0_set_psp_funcs(psp);
break;
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 3):
case IP_VERSION(13, 0, 5):
case IP_VERSION(13, 0, 8):
case IP_VERSION(13, 0, 11):
case IP_VERSION(14, 0, 0):
psp_v13_0_set_psp_funcs(psp);
psp->autoload_supported = true;
break;
case IP_VERSION(11, 0, 8):
if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
psp_v11_0_8_set_psp_funcs(psp);
psp->autoload_supported = false;
}
break;
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 10):
psp_v13_0_set_psp_funcs(psp);
psp->autoload_supported = true;
adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
break;
case IP_VERSION(13, 0, 4):
psp_v13_0_4_set_psp_funcs(psp);
psp->autoload_supported = true;
break;
default:
return -EINVAL;
}
psp->adev = adev;
psp_check_pmfw_centralized_cstate_management(psp);
if (amdgpu_sriov_vf(adev))
return psp_init_sriov_microcode(psp);
else
return psp_init_microcode(psp);
}
void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
{
amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr,
&mem_ctx->shared_buf);
mem_ctx->shared_bo = NULL;
}
static void psp_free_shared_bufs(struct psp_context *psp)
{
void *tmr_buf;
void **pptr;
/* free TMR memory buffer */
pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
psp->tmr_bo = NULL;
/* free xgmi shared memory */
psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context);
/* free ras shared memory */
psp_ta_free_shared_buf(&psp->ras_context.context.mem_context);
/* free hdcp shared memory */
psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context);
/* free dtm shared memory */
psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context);
/* free rap shared memory */
psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
/* free securedisplay shared memory */
psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
}
static void psp_memory_training_fini(struct psp_context *psp)
{
struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
kfree(ctx->sys_cache);
ctx->sys_cache = NULL;
}
static int psp_memory_training_init(struct psp_context *psp)
{
int ret;
struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
DRM_DEBUG("memory training is not supported!\n");
return 0;
}
ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
if (ctx->sys_cache == NULL) {
DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n");
ret = -ENOMEM;
goto Err_out;
}
DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
ctx->train_data_size,
ctx->p2c_train_data_offset,
ctx->c2p_train_data_offset);
ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
return 0;
Err_out:
psp_memory_training_fini(psp);
return ret;
}
/*
* Helper funciton to query psp runtime database entry
*
* @adev: amdgpu_device pointer
* @entry_type: the type of psp runtime database entry
* @db_entry: runtime database entry pointer
*
* Return false if runtime database doesn't exit or entry is invalid
* or true if the specific database entry is found, and copy to @db_entry
*/
static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
enum psp_runtime_entry_type entry_type,
void *db_entry)
{
uint64_t db_header_pos, db_dir_pos;
struct psp_runtime_data_header db_header = {0};
struct psp_runtime_data_directory db_dir = {0};
bool ret = false;
int i;
if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6))
return false;
db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET;
db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header);
/* read runtime db header from vram */
amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header,
sizeof(struct psp_runtime_data_header), false);
if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) {
/* runtime db doesn't exist, exit */
dev_dbg(adev->dev, "PSP runtime database doesn't exist\n");
return false;
}
/* read runtime database entry from vram */
amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir,
sizeof(struct psp_runtime_data_directory), false);
if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) {
/* invalid db entry count, exit */
dev_warn(adev->dev, "Invalid PSP runtime database entry count\n");
return false;
}
/* look up for requested entry type */
for (i = 0; i < db_dir.entry_count && !ret; i++) {
if (db_dir.entry_list[i].entry_type == entry_type) {
switch (entry_type) {
case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG:
if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) {
/* invalid db entry size */
dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n");
return false;
}
/* read runtime database entry */
amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
(uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false);
ret = true;
break;
case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS:
if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) {
/* invalid db entry size */
dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n");
return false;
}
/* read runtime database entry */
amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
(uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false);
ret = true;
break;
default:
ret = false;
break;
}
}
}
return ret;
}
static int psp_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
int ret;
struct psp_runtime_boot_cfg_entry boot_cfg_entry;
struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx;
struct psp_runtime_scpm_entry scpm_entry;
psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!psp->cmd) {
DRM_ERROR("Failed to allocate memory to command buffer!\n");
ret = -ENOMEM;
}
adev->psp.xgmi_context.supports_extended_data =
!adev->gmc.xgmi.connected_to_cpu &&
adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2);
memset(&scpm_entry, 0, sizeof(scpm_entry));
if ((psp_get_runtime_db_entry(adev,
PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS,
&scpm_entry)) &&
(scpm_entry.scpm_status != SCPM_DISABLE)) {
adev->scpm_enabled = true;
adev->scpm_status = scpm_entry.scpm_status;
} else {
adev->scpm_enabled = false;
adev->scpm_status = SCPM_DISABLE;
}
/* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */
memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry));
if (psp_get_runtime_db_entry(adev,
PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG,
&boot_cfg_entry)) {
psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask;
if ((psp->boot_cfg_bitmask) &
BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) {
/* If psp runtime database exists, then
* only enable two stage memory training
* when TWO_STAGE_DRAM_TRAINING bit is set
* in runtime database
*/
mem_training_ctx->enable_mem_training = true;
}
} else {
/* If psp runtime database doesn't exist or is
* invalid, force enable two stage memory training
*/
mem_training_ctx->enable_mem_training = true;
}
if (mem_training_ctx->enable_mem_training) {
ret = psp_memory_training_init(psp);
if (ret) {
DRM_ERROR("Failed to initialize memory training!\n");
return ret;
}
ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
if (ret) {
DRM_ERROR("Failed to process memory training!\n");
return ret;
}
}
ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
amdgpu_sriov_vf(adev) ?
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
&psp->fw_pri_bo,
&psp->fw_pri_mc_addr,
&psp->fw_pri_buf);
if (ret)
return ret;
ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&psp->fence_buf_bo,
&psp->fence_buf_mc_addr,
&psp->fence_buf);
if (ret)
goto failed1;
ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
(void **)&psp->cmd_buf_mem);
if (ret)
goto failed2;
return 0;
failed2:
amdgpu_bo_free_kernel(&psp->fence_buf_bo,
&psp->fence_buf_mc_addr, &psp->fence_buf);
failed1:
amdgpu_bo_free_kernel(&psp->fw_pri_bo,
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);
return ret;
}
static int psp_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
struct psp_gfx_cmd_resp *cmd = psp->cmd;
psp_memory_training_fini(psp);
amdgpu_ucode_release(&psp->sos_fw);
amdgpu_ucode_release(&psp->asd_fw);
amdgpu_ucode_release(&psp->ta_fw);
amdgpu_ucode_release(&psp->cap_fw);
amdgpu_ucode_release(&psp->toc_fw);
kfree(cmd);
cmd = NULL;
psp_free_shared_bufs(psp);
if (psp->km_ring.ring_mem)
amdgpu_bo_free_kernel(&adev->firmware.rbuf,
&psp->km_ring.ring_mem_mc_addr,
(void **)&psp->km_ring.ring_mem);
amdgpu_bo_free_kernel(&psp->fw_pri_bo,
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);
amdgpu_bo_free_kernel(&psp->fence_buf_bo,
&psp->fence_buf_mc_addr, &psp->fence_buf);
amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
(void **)&psp->cmd_buf_mem);
return 0;
}
int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
uint32_t reg_val, uint32_t mask, bool check_changed)
{
uint32_t val;
int i;
struct amdgpu_device *adev = psp->adev;
if (psp->adev->no_hw_access)
return 0;
for (i = 0; i < adev->usec_timeout; i++) {
val = RREG32(reg_index);
if (check_changed) {
if (val != reg_val)
return 0;
} else {
if ((val & mask) == reg_val)
return 0;
}
udelay(1);
}
return -ETIME;
}
int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
uint32_t reg_val, uint32_t mask, uint32_t msec_timeout)
{
uint32_t val;
int i;
struct amdgpu_device *adev = psp->adev;
if (psp->adev->no_hw_access)
return 0;
for (i = 0; i < msec_timeout; i++) {
val = RREG32(reg_index);
if ((val & mask) == reg_val)
return 0;
msleep(1);
}
return -ETIME;
}
static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
{
switch (cmd_id) {
case GFX_CMD_ID_LOAD_TA:
return "LOAD_TA";
case GFX_CMD_ID_UNLOAD_TA:
return "UNLOAD_TA";
case GFX_CMD_ID_INVOKE_CMD:
return "INVOKE_CMD";
case GFX_CMD_ID_LOAD_ASD:
return "LOAD_ASD";
case GFX_CMD_ID_SETUP_TMR:
return "SETUP_TMR";
case GFX_CMD_ID_LOAD_IP_FW:
return "LOAD_IP_FW";
case GFX_CMD_ID_DESTROY_TMR:
return "DESTROY_TMR";
case GFX_CMD_ID_SAVE_RESTORE:
return "SAVE_RESTORE_IP_FW";
case GFX_CMD_ID_SETUP_VMR:
return "SETUP_VMR";
case GFX_CMD_ID_DESTROY_VMR:
return "DESTROY_VMR";
case GFX_CMD_ID_PROG_REG:
return "PROG_REG";
case GFX_CMD_ID_GET_FW_ATTESTATION:
return "GET_FW_ATTESTATION";
case GFX_CMD_ID_LOAD_TOC:
return "ID_LOAD_TOC";
case GFX_CMD_ID_AUTOLOAD_RLC:
return "AUTOLOAD_RLC";
case GFX_CMD_ID_BOOT_CFG:
return "BOOT_CFG";
default:
return "UNKNOWN CMD";
}
}
static int
psp_cmd_submit_buf(struct psp_context *psp,
struct amdgpu_firmware_info *ucode,
struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
{
int ret;
int index;
int timeout = 20000;
bool ras_intr = false;
bool skip_unsupport = false;
if (psp->adev->no_hw_access)
return 0;
memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
index = atomic_inc_return(&psp->fence_value);
ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
if (ret) {
atomic_dec(&psp->fence_value);
goto exit;
}
amdgpu_device_invalidate_hdp(psp->adev, NULL);
while (*((unsigned int *)psp->fence_buf) != index) {
if (--timeout == 0)
break;
/*
* Shouldn't wait for timeout when err_event_athub occurs,
* because gpu reset thread triggered and lock resource should
* be released for psp resume sequence.
*/
ras_intr = amdgpu_ras_intr_triggered();
if (ras_intr)
break;
usleep_range(10, 100);
amdgpu_device_invalidate_hdp(psp->adev, NULL);
}
/* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp));
/* In some cases, psp response status is not 0 even there is no
* problem while the command is submitted. Some version of PSP FW
* doesn't write 0 to that field.
* So here we would like to only print a warning instead of an error
* during psp initialization to avoid breaking hw_init and it doesn't
* return -EINVAL.
*/
if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
if (ucode)
DRM_WARN("failed to load ucode %s(0x%X) ",
amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
DRM_WARN("psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id,
psp->cmd_buf_mem->resp.status);
/* If any firmware (including CAP) load fails under SRIOV, it should
* return failure to stop the VF from initializing.
* Also return failure in case of timeout
*/
if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) {
ret = -EINVAL;
goto exit;
}
}
if (ucode) {
ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
}
exit:
return ret;
}
static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp)
{
struct psp_gfx_cmd_resp *cmd = psp->cmd;
mutex_lock(&psp->mutex);
memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
return cmd;
}
static void release_psp_cmd_buf(struct psp_context *psp)
{
mutex_unlock(&psp->mutex);
}
static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
struct psp_gfx_cmd_resp *cmd,
uint64_t tmr_mc, struct amdgpu_bo *tmr_bo)
{
struct amdgpu_device *adev = psp->adev;
uint32_t size = 0;
uint64_t tmr_pa = 0;
if (tmr_bo) {
size = amdgpu_bo_size(tmr_bo);
tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo);
}
if (amdgpu_sriov_vf(psp->adev))
cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
else
cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
cmd->cmd.cmd_setup_tmr.buf_size = size;
cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1;
cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa);
cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa);
}
static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
uint64_t pri_buf_mc, uint32_t size)
{
cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
cmd->cmd.cmd_load_toc.toc_size = size;
}
/* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
static int psp_load_toc(struct psp_context *psp,
uint32_t *tmr_size)
{
int ret;
struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
/* Copy toc to psp firmware private buffer */
psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes);
psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
if (!ret)
*tmr_size = psp->cmd_buf_mem->resp.tmr_size;
release_psp_cmd_buf(psp);
return ret;
}
static bool psp_boottime_tmr(struct psp_context *psp)
{
switch (psp->adev->ip_versions[MP0_HWIP][0]) {
case IP_VERSION(13, 0, 6):
return true;
default:
return false;
}
}
/* Set up Trusted Memory Region */
static int psp_tmr_init(struct psp_context *psp)
{
int ret = 0;
int tmr_size;
void *tmr_buf;
void **pptr;
/*
* According to HW engineer, they prefer the TMR address be "naturally
* aligned" , e.g. the start address be an integer divide of TMR size.
*
* Note: this memory need be reserved till the driver
* uninitializes.
*/
tmr_size = PSP_TMR_SIZE(psp->adev);
/* For ASICs support RLC autoload, psp will parse the toc
* and calculate the total size of TMR needed
*/
if (!amdgpu_sriov_vf(psp->adev) &&
psp->toc.start_addr &&
psp->toc.size_bytes &&
psp->fw_pri_buf) {
ret = psp_load_toc(psp, &tmr_size);
if (ret) {
DRM_ERROR("Failed to load toc\n");
return ret;
}
}
if (!psp->tmr_bo) {
pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
PSP_TMR_ALIGNMENT,
AMDGPU_HAS_VRAM(psp->adev) ?
AMDGPU_GEM_DOMAIN_VRAM :
AMDGPU_GEM_DOMAIN_GTT,
&psp->tmr_bo, &psp->tmr_mc_addr,
pptr);
}
return ret;
}
static bool psp_skip_tmr(struct psp_context *psp)
{
switch (psp->adev->ip_versions[MP0_HWIP][0]) {
case IP_VERSION(11, 0, 9):
case IP_VERSION(11, 0, 7):
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 10):
return true;
default:
return false;
}
}
static int psp_tmr_load(struct psp_context *psp)
{
int ret;
struct psp_gfx_cmd_resp *cmd;
/* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
* Already set up by host driver.
*/
if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
return 0;
cmd = acquire_psp_cmd_buf(psp);
psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
if (psp->tmr_bo)
DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n",
amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
release_psp_cmd_buf(psp);
return ret;
}
static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
struct psp_gfx_cmd_resp *cmd)
{
if (amdgpu_sriov_vf(psp->adev))
cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
else
cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
}
static int psp_tmr_unload(struct psp_context *psp)
{
int ret;
struct psp_gfx_cmd_resp *cmd;
/* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV,
* as TMR is not loaded at all
*/
if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
return 0;
cmd = acquire_psp_cmd_buf(psp);
psp_prep_tmr_unload_cmd_buf(psp, cmd);
dev_dbg(psp->adev->dev, "free PSP TMR buffer\n");
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
release_psp_cmd_buf(psp);
return ret;
}
static int psp_tmr_terminate(struct psp_context *psp)
{
return psp_tmr_unload(psp);
}
int psp_get_fw_attestation_records_addr(struct psp_context *psp,
uint64_t *output_ptr)
{
int ret;
struct psp_gfx_cmd_resp *cmd;
if (!output_ptr)
return -EINVAL;
if (amdgpu_sriov_vf(psp->adev))
return 0;
cmd = acquire_psp_cmd_buf(psp);
cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION;
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
if (!ret) {
*output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) +
((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32);
}
release_psp_cmd_buf(psp);
return ret;
}
static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
{
struct psp_context *psp = &adev->psp;
struct psp_gfx_cmd_resp *cmd;
int ret;
if (amdgpu_sriov_vf(adev))
return 0;
cmd = acquire_psp_cmd_buf(psp);
cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET;
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
if (!ret) {
*boot_cfg =
(cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0;
}
release_psp_cmd_buf(psp);
return ret;
}
static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg)
{
int ret;
struct psp_context *psp = &adev->psp;
struct psp_gfx_cmd_resp *cmd;
if (amdgpu_sriov_vf(adev))
return 0;
cmd = acquire_psp_cmd_buf(psp);
cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
cmd->cmd.boot_cfg.boot_config = boot_cfg;
cmd->cmd.boot_cfg.boot_config_valid = boot_cfg;
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
release_psp_cmd_buf(psp);
return ret;
}
static int psp_rl_load(struct amdgpu_device *adev)
{
int ret;
struct psp_context *psp = &adev->psp;
struct psp_gfx_cmd_resp *cmd;
if (!is_psp_fw_valid(psp->rl))
return 0;
cmd = acquire_psp_cmd_buf(psp);
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes);
cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr);
cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr);
cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes;
cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST;
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
release_psp_cmd_buf(psp);
return ret;
}
int psp_spatial_partition(struct psp_context *psp, int mode)
{
struct psp_gfx_cmd_resp *cmd;
int ret;
if (amdgpu_sriov_vf(psp->adev))
return 0;
cmd = acquire_psp_cmd_buf(psp);
cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART;
cmd->cmd.cmd_spatial_part.mode = mode;
dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
release_psp_cmd_buf(psp);
return ret;
}
static int psp_asd_initialize(struct psp_context *psp)
{
int ret;
/* If PSP version doesn't match ASD version, asd loading will be failed.
* add workaround to bypass it for sriov now.
* TODO: add version check to make it common
*/
if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
return 0;
psp->asd_context.mem_context.shared_mc_addr = 0;
psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE;
psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD;
ret = psp_ta_load(psp, &psp->asd_context);
if (!ret)
psp->asd_context.initialized = true;
return ret;
}
static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
uint32_t session_id)
{
cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
cmd->cmd.cmd_unload_ta.session_id = session_id;
}
int psp_ta_unload(struct psp_context *psp, struct ta_context *context)
{
int ret;
struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
psp_prep_ta_unload_cmd_buf(cmd, context->session_id);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
context->resp_status = cmd->resp.status;
release_psp_cmd_buf(psp);
return ret;
}
static int psp_asd_terminate(struct psp_context *psp)
{
int ret;
if (amdgpu_sriov_vf(psp->adev))
return 0;
if (!psp->asd_context.initialized)
return 0;
ret = psp_ta_unload(psp, &psp->asd_context);
if (!ret)
psp->asd_context.initialized = false;
return ret;
}
static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
uint32_t id, uint32_t value)
{
cmd->cmd_id = GFX_CMD_ID_PROG_REG;
cmd->cmd.cmd_setup_reg_prog.reg_value = value;
cmd->cmd.cmd_setup_reg_prog.reg_id = id;
}
int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
uint32_t value)
{
struct psp_gfx_cmd_resp *cmd;
int ret = 0;
if (reg >= PSP_REG_LAST)
return -EINVAL;
cmd = acquire_psp_cmd_buf(psp);
psp_prep_reg_prog_cmd_buf(cmd, reg, value);
ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
if (ret)
DRM_ERROR("PSP failed to program reg id %d", reg);
release_psp_cmd_buf(psp);
return ret;
}
static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
uint64_t ta_bin_mc,
struct ta_context *context)
{
cmd->cmd_id = context->ta_load_type;
cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc);
cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc);
cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes;
cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
lower_32_bits(context->mem_context.shared_mc_addr);
cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
upper_32_bits(context->mem_context.shared_mc_addr);
cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size;
}
int psp_ta_init_shared_buf(struct psp_context *psp,
struct ta_mem_context *mem_ctx)
{
/*
* Allocate 16k memory aligned to 4k from Frame Buffer (local
* physical) for ta to host memory
*/
return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&mem_ctx->shared_bo,
&mem_ctx->shared_mc_addr,
&mem_ctx->shared_buf);
}
static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
uint32_t ta_cmd_id,
uint32_t session_id)
{
cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
cmd->cmd.cmd_invoke_cmd.session_id = session_id;
cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
}
int psp_ta_invoke(struct psp_context *psp,
uint32_t ta_cmd_id,
struct ta_context *context)
{
int ret;
struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
context->resp_status = cmd->resp.status;
release_psp_cmd_buf(psp);
return ret;
}
int psp_ta_load(struct psp_context *psp, struct ta_context *context)
{
int ret;
struct psp_gfx_cmd_resp *cmd;
cmd = acquire_psp_cmd_buf(psp);
psp_copy_fw(psp, context->bin_desc.start_addr,
context->bin_desc.size_bytes);
psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
context->resp_status = cmd->resp.status;
if (!ret)
context->session_id = cmd->resp.session_id;
release_psp_cmd_buf(psp);
return ret;
}
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context);
}
int psp_xgmi_terminate(struct psp_context *psp)
{
int ret;
struct amdgpu_device *adev = psp->adev;
/* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 4) ||
(adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2) &&
adev->gmc.xgmi.connected_to_cpu))
return 0;
if (!psp->xgmi_context.context.initialized)
return 0;
ret = psp_ta_unload(psp, &psp->xgmi_context.context);
psp->xgmi_context.context.initialized = false;
return ret;
}
int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta)
{
struct ta_xgmi_shared_memory *xgmi_cmd;
int ret;
if (!psp->ta_fw ||
!psp->xgmi_context.context.bin_desc.size_bytes ||
!psp->xgmi_context.context.bin_desc.start_addr)
return -ENOENT;
if (!load_ta)
goto invoke;
psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
if (!psp->xgmi_context.context.mem_context.shared_buf) {
ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
if (ret)
return ret;
}
/* Load XGMI TA */
ret = psp_ta_load(psp, &psp->xgmi_context.context);
if (!ret)
psp->xgmi_context.context.initialized = true;
else
return ret;
invoke:
/* Initialize XGMI session */
xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf);
memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
xgmi_cmd->flag_extend_link_record = set_extended_data;
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
return ret;
}
int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
{
struct ta_xgmi_shared_memory *xgmi_cmd;
int ret;
xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
/* Invoke xgmi ta to get hive id */
ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
if (ret)
return ret;
*hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
return 0;
}
int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
{
struct ta_xgmi_shared_memory *xgmi_cmd;
int ret;
xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
/* Invoke xgmi ta to get the node id */
ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
if (ret)
return ret;
*node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
return 0;
}
static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
{
return (psp->adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2) &&
psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) ||
psp->adev->ip_versions[MP0_HWIP][0] >= IP_VERSION(13, 0, 6);
}
/*
* Chips that support extended topology information require the driver to
* reflect topology information in the opposite direction. This is
* because the TA has already exceeded its link record limit and if the
* TA holds bi-directional information, the driver would have to do
* multiple fetches instead of just two.
*/
static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
struct psp_xgmi_node_info node_info)
{
struct amdgpu_device *mirror_adev;
struct amdgpu_hive_info *hive;
uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
uint64_t dst_node_id = node_info.node_id;
uint8_t dst_num_hops = node_info.num_hops;
uint8_t dst_num_links = node_info.num_links;
hive = amdgpu_get_xgmi_hive(psp->adev);
list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
struct psp_xgmi_topology_info *mirror_top_info;
int j;
if (mirror_adev->gmc.xgmi.node_id != dst_node_id)
continue;
mirror_top_info = &mirror_adev->psp.xgmi_context.top_info;
for (j = 0; j < mirror_top_info->num_nodes; j++) {
if (mirror_top_info->nodes[j].node_id != src_node_id)
continue;
mirror_top_info->nodes[j].num_hops = dst_num_hops;
/*
* prevent 0 num_links value re-reflection since reflection
* criteria is based on num_hops (direct or indirect).
*
*/
if (dst_num_links)
mirror_top_info->nodes[j].num_links = dst_num_links;
break;
}
break;
}
amdgpu_put_xgmi_hive(hive);
}
int psp_xgmi_get_topology_info(struct psp_context *psp,
int number_devices,
struct psp_xgmi_topology_info *topology,
bool get_extended_data)
{
struct ta_xgmi_shared_memory *xgmi_cmd;
struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
int i;
int ret;
if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
return -EINVAL;
xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
xgmi_cmd->flag_extend_link_record = get_extended_data;
/* Fill in the shared memory with topology information as input */
topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO;
topology_info_input->num_nodes = number_devices;
for (i = 0; i < topology_info_input->num_nodes; i++) {
topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
}
/* Invoke xgmi ta to get the topology information */
ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO);
if (ret)
return ret;
/* Read the output topology information from the shared memory */
topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
for (i = 0; i < topology->num_nodes; i++) {
/* extended data will either be 0 or equal to non-extended data */
if (topology_info_output->nodes[i].num_hops)
topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
/* non-extended data gets everything here so no need to update */
if (!get_extended_data) {
topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
topology->nodes[i].is_sharing_enabled =
topology_info_output->nodes[i].is_sharing_enabled;
topology->nodes[i].sdma_engine =
topology_info_output->nodes[i].sdma_engine;
}
}
/* Invoke xgmi ta again to get the link information */
if (psp_xgmi_peer_link_info_supported(psp)) {
struct ta_xgmi_cmd_get_peer_link_info_output *link_info_output;
bool requires_reflection =
(psp->xgmi_context.supports_extended_data && get_extended_data) ||
psp->adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6);
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS;
ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_PEER_LINKS);
if (ret)
return ret;
link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
for (i = 0; i < topology->num_nodes; i++) {
/* accumulate num_links on extended data */
topology->nodes[i].num_links = get_extended_data ?
topology->nodes[i].num_links +
link_info_output->nodes[i].num_links :
((requires_reflection && topology->nodes[i].num_links) ? topology->nodes[i].num_links :
link_info_output->nodes[i].num_links);
/* reflect the topology information for bi-directionality */
if (requires_reflection && topology->nodes[i].num_hops)
psp_xgmi_reflect_topology_info(psp, topology->nodes[i]);
}
}
return 0;
}
int psp_xgmi_set_topology_info(struct psp_context *psp,
int number_devices,
struct psp_xgmi_topology_info *topology)
{
struct ta_xgmi_shared_memory *xgmi_cmd;
struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
int i;
if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
return -EINVAL;
xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
topology_info_input->num_nodes = number_devices;
for (i = 0; i < topology_info_input->num_nodes; i++) {
topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
topology_info_input->nodes[i].is_sharing_enabled = 1;
topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
}
/* Invoke xgmi ta to set topology information */
return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
}
// ras begin
static void psp_ras_ta_check_status(struct psp_context *psp)
{
struct ta_ras_shared_memory *ras_cmd =
(struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
switch (ras_cmd->ras_status) {
case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
dev_warn(psp->adev->dev,
"RAS WARNING: cmd failed due to unsupported ip\n");
break;
case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
dev_warn(psp->adev->dev,
"RAS WARNING: cmd failed due to unsupported error injection\n");
break;
case TA_RAS_STATUS__SUCCESS:
break;
case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
dev_warn(psp->adev->dev,
"RAS WARNING: Inject error to critical region is not allowed\n");
break;
default:
dev_warn(psp->adev->dev,
"RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
break;
}
}
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
struct ta_ras_shared_memory *ras_cmd;
int ret;
ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
/*
* TODO: bypass the loading in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context);
if (amdgpu_ras_intr_triggered())
return ret;
if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
DRM_WARN("RAS: Unsupported Interface");
return -EINVAL;
}
if (!ret) {
if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
dev_warn(psp->adev->dev, "ECC switch disabled\n");
ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
} else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
dev_warn(psp->adev->dev,
"RAS internal register access blocked\n");
psp_ras_ta_check_status(psp);
}
return ret;
}
int psp_ras_enable_features(struct psp_context *psp,
union ta_ras_cmd_input *info, bool enable)
{
struct ta_ras_shared_memory *ras_cmd;
int ret;
if (!psp->ras_context.context.initialized)
return -EINVAL;
ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
if (enable)
ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES;
else
ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES;
ras_cmd->ras_in_message = *info;
ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
if (ret)
return -EINVAL;
return 0;
}
int psp_ras_terminate(struct psp_context *psp)
{
int ret;
/*
* TODO: bypass the terminate in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
if (!psp->ras_context.context.initialized)
return 0;
ret = psp_ta_unload(psp, &psp->ras_context.context);
psp->ras_context.context.initialized = false;
return ret;
}
int psp_ras_initialize(struct psp_context *psp)
{
int ret;
uint32_t boot_cfg = 0xFF;
struct amdgpu_device *adev = psp->adev;
struct ta_ras_shared_memory *ras_cmd;
/*
* TODO: bypass the initialize in sriov for now
*/
if (amdgpu_sriov_vf(adev))
return 0;
if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
!adev->psp.ras_context.context.bin_desc.start_addr) {
dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
return 0;
}
if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
/* query GECC enablement status from boot config
* boot_cfg: 1: GECC is enabled or 0: GECC is disabled
*/
ret = psp_boot_config_get(adev, &boot_cfg);
if (ret)
dev_warn(adev->dev, "PSP get boot config failed\n");
if (!amdgpu_ras_is_supported(psp->adev, AMDGPU_RAS_BLOCK__UMC)) {
if (!boot_cfg) {
dev_info(adev->dev, "GECC is disabled\n");
} else {
/* disable GECC in next boot cycle if ras is
* disabled by module parameter amdgpu_ras_enable
* and/or amdgpu_ras_mask, or boot_config_get call
* is failed
*/
ret = psp_boot_config_set(adev, 0);
if (ret)
dev_warn(adev->dev, "PSP set boot config failed\n");
else
dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
}
} else {
if (boot_cfg == 1) {
dev_info(adev->dev, "GECC is enabled\n");
} else {
/* enable GECC in next boot cycle if it is disabled
* in boot config, or force enable GECC if failed to
* get boot configuration
*/
ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
if (ret)
dev_warn(adev->dev, "PSP set boot config failed\n");
else
dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
}
}
}
psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE;
psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
if (!psp->ras_context.context.mem_context.shared_buf) {
ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context);
if (ret)
return ret;
}
ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
if (amdgpu_ras_is_poison_mode_supported(adev))
ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
ras_cmd->ras_in_message.init_flags.xcc_mask =
adev->gfx.xcc_mask;
ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
ret = psp_ta_load(psp, &psp->ras_context.context);
if (!ret && !ras_cmd->ras_status)
psp->ras_context.context.initialized = true;
else {
if (ras_cmd->ras_status)
dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
/* fail to load RAS TA */
psp->ras_context.context.initialized = false;
}
return ret;
}
int psp_ras_trigger_error(struct psp_context *psp,
struct ta_ras_trigger_error_input *info, uint32_t instance_mask)
{
struct ta_ras_shared_memory *ras_cmd;
struct amdgpu_device *adev = psp->adev;
int ret;
uint32_t dev_mask;
if (!psp->ras_context.context.initialized)
return -EINVAL;
switch (info->block_id) {
case TA_RAS_BLOCK__GFX:
dev_mask = GET_MASK(GC, instance_mask);
break;
case TA_RAS_BLOCK__SDMA:
dev_mask = GET_MASK(SDMA0, instance_mask);
break;
case TA_RAS_BLOCK__VCN:
case TA_RAS_BLOCK__JPEG:
dev_mask = GET_MASK(VCN, instance_mask);
break;
default:
dev_mask = instance_mask;
break;
}
/* reuse sub_block_index for backward compatibility */
dev_mask <<= AMDGPU_RAS_INST_SHIFT;
dev_mask &= AMDGPU_RAS_INST_MASK;
info->sub_block_index |= dev_mask;
ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
ras_cmd->ras_in_message.trigger_error = *info;
ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
if (ret)
return -EINVAL;
/* If err_event_athub occurs error inject was successful, however
* return status from TA is no long reliable
*/
if (amdgpu_ras_intr_triggered())
return 0;
if (ras_cmd->ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
return -EACCES;
else if (ras_cmd->ras_status)
return -EINVAL;
return 0;
}
// ras end
// HDCP start
static int psp_hdcp_initialize(struct psp_context *psp)
{
int ret;
/*
* TODO: bypass the initialize in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
if (!psp->hdcp_context.context.bin_desc.size_bytes ||
!psp->hdcp_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
return 0;
}
psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
if (!psp->hdcp_context.context.mem_context.shared_buf) {
ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
if (ret)
return ret;
}
ret = psp_ta_load(psp, &psp->hdcp_context.context);
if (!ret) {
psp->hdcp_context.context.initialized = true;
mutex_init(&psp->hdcp_context.mutex);
}
return ret;
}
int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
/*
* TODO: bypass the loading in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
}
static int psp_hdcp_terminate(struct psp_context *psp)
{
int ret;
/*
* TODO: bypass the terminate in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
if (!psp->hdcp_context.context.initialized)
return 0;
ret = psp_ta_unload(psp, &psp->hdcp_context.context);
psp->hdcp_context.context.initialized = false;
return ret;
}
// HDCP end
// DTM start
static int psp_dtm_initialize(struct psp_context *psp)
{
int ret;
/*
* TODO: bypass the initialize in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
if (!psp->dtm_context.context.bin_desc.size_bytes ||
!psp->dtm_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
return 0;
}
psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
if (!psp->dtm_context.context.mem_context.shared_buf) {
ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
if (ret)
return ret;
}
ret = psp_ta_load(psp, &psp->dtm_context.context);
if (!ret) {
psp->dtm_context.context.initialized = true;
mutex_init(&psp->dtm_context.mutex);
}
return ret;
}
int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
/*
* TODO: bypass the loading in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
}
static int psp_dtm_terminate(struct psp_context *psp)
{
int ret;
/*
* TODO: bypass the terminate in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
if (!psp->dtm_context.context.initialized)
return 0;
ret = psp_ta_unload(psp, &psp->dtm_context.context);
psp->dtm_context.context.initialized = false;
return ret;
}
// DTM end
// RAP start
static int psp_rap_initialize(struct psp_context *psp)
{
int ret;
enum ta_rap_status status = TA_RAP_STATUS__SUCCESS;
/*
* TODO: bypass the initialize in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
if (!psp->rap_context.context.bin_desc.size_bytes ||
!psp->rap_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
return 0;
}
psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
if (!psp->rap_context.context.mem_context.shared_buf) {
ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
if (ret)
return ret;
}
ret = psp_ta_load(psp, &psp->rap_context.context);
if (!ret) {
psp->rap_context.context.initialized = true;
mutex_init(&psp->rap_context.mutex);
} else
return ret;
ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status);
if (ret || status != TA_RAP_STATUS__SUCCESS) {
psp_rap_terminate(psp);
/* free rap shared memory */
psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
ret, status);
return ret;
}
return 0;
}
static int psp_rap_terminate(struct psp_context *psp)
{
int ret;
if (!psp->rap_context.context.initialized)
return 0;
ret = psp_ta_unload(psp, &psp->rap_context.context);
psp->rap_context.context.initialized = false;
return ret;
}
int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status)
{
struct ta_rap_shared_memory *rap_cmd;
int ret = 0;
if (!psp->rap_context.context.initialized)
return 0;
if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
return -EINVAL;
mutex_lock(&psp->rap_context.mutex);
rap_cmd = (struct ta_rap_shared_memory *)
psp->rap_context.context.mem_context.shared_buf;
memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
rap_cmd->cmd_id = ta_cmd_id;
rap_cmd->validation_method_id = METHOD_A;
ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context);
if (ret)
goto out_unlock;
if (status)
*status = rap_cmd->rap_status;
out_unlock:
mutex_unlock(&psp->rap_context.mutex);
return ret;
}
// RAP end
/* securedisplay start */
static int psp_securedisplay_initialize(struct psp_context *psp)
{
int ret;
struct ta_securedisplay_cmd *securedisplay_cmd;
/*
* TODO: bypass the initialize in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
!psp->securedisplay_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
return 0;
}
psp->securedisplay_context.context.mem_context.shared_mem_size =
PSP_SECUREDISPLAY_SHARED_MEM_SIZE;
psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
if (!psp->securedisplay_context.context.initialized) {
ret = psp_ta_init_shared_buf(psp,
&psp->securedisplay_context.context.mem_context);
if (ret)
return ret;
}
ret = psp_ta_load(psp, &psp->securedisplay_context.context);
if (!ret) {
psp->securedisplay_context.context.initialized = true;
mutex_init(&psp->securedisplay_context.mutex);
} else
return ret;
mutex_lock(&psp->securedisplay_context.mutex);
psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
TA_SECUREDISPLAY_COMMAND__QUERY_TA);
ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
mutex_unlock(&psp->securedisplay_context.mutex);
if (ret) {
psp_securedisplay_terminate(psp);
/* free securedisplay shared memory */
psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
return -EINVAL;
}
if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
/* don't try again */
psp->securedisplay_context.context.bin_desc.size_bytes = 0;
}
return 0;
}
static int psp_securedisplay_terminate(struct psp_context *psp)
{
int ret;
/*
* TODO:bypass the terminate in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
if (!psp->securedisplay_context.context.initialized)
return 0;
ret = psp_ta_unload(psp, &psp->securedisplay_context.context);
psp->securedisplay_context.context.initialized = false;
return ret;
}
int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
int ret;
if (!psp->securedisplay_context.context.initialized)
return -EINVAL;
if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC)
return -EINVAL;
ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context);
return ret;
}
/* SECUREDISPLAY end */
int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
{
struct psp_context *psp = &adev->psp;
int ret = 0;
if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL)
ret = psp->funcs->wait_for_bootloader(psp);
return ret;
}
static int psp_hw_start(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
int ret;
if (!amdgpu_sriov_vf(adev)) {
if ((is_psp_fw_valid(psp->kdb)) &&
(psp->funcs->bootloader_load_kdb != NULL)) {
ret = psp_bootloader_load_kdb(psp);
if (ret) {
DRM_ERROR("PSP load kdb failed!\n");
return ret;
}
}
if ((is_psp_fw_valid(psp->spl)) &&
(psp->funcs->bootloader_load_spl != NULL)) {
ret = psp_bootloader_load_spl(psp);
if (ret) {
DRM_ERROR("PSP load spl failed!\n");
return ret;
}
}
if ((is_psp_fw_valid(psp->sys)) &&
(psp->funcs->bootloader_load_sysdrv != NULL)) {
ret = psp_bootloader_load_sysdrv(psp);
if (ret) {
DRM_ERROR("PSP load sys drv failed!\n");
return ret;
}
}
if ((is_psp_fw_valid(psp->soc_drv)) &&
(psp->funcs->bootloader_load_soc_drv != NULL)) {
ret = psp_bootloader_load_soc_drv(psp);
if (ret) {
DRM_ERROR("PSP load soc drv failed!\n");
return ret;
}
}
if ((is_psp_fw_valid(psp->intf_drv)) &&
(psp->funcs->bootloader_load_intf_drv != NULL)) {
ret = psp_bootloader_load_intf_drv(psp);
if (ret) {
DRM_ERROR("PSP load intf drv failed!\n");
return ret;
}
}
if ((is_psp_fw_valid(psp->dbg_drv)) &&
(psp->funcs->bootloader_load_dbg_drv != NULL)) {
ret = psp_bootloader_load_dbg_drv(psp);
if (ret) {
DRM_ERROR("PSP load dbg drv failed!\n");
return ret;
}
}
if ((is_psp_fw_valid(psp->ras_drv)) &&
(psp->funcs->bootloader_load_ras_drv != NULL)) {
ret = psp_bootloader_load_ras_drv(psp);
if (ret) {
DRM_ERROR("PSP load ras_drv failed!\n");
return ret;
}
}
if ((is_psp_fw_valid(psp->sos)) &&
(psp->funcs->bootloader_load_sos != NULL)) {
ret = psp_bootloader_load_sos(psp);
if (ret) {
DRM_ERROR("PSP load sos failed!\n");
return ret;
}
}
}
ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
if (ret) {
DRM_ERROR("PSP create ring failed!\n");
return ret;
}
if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
goto skip_pin_bo;
if (!psp_boottime_tmr(psp)) {
ret = psp_tmr_init(psp);
if (ret) {
DRM_ERROR("PSP tmr init failed!\n");
return ret;
}
}
skip_pin_bo:
/*
* For ASICs with DF Cstate management centralized
* to PMFW, TMR setup should be performed after PMFW
* loaded and before other non-psp firmware loaded.
*/
if (psp->pmfw_centralized_cstate_management) {
ret = psp_load_smu_fw(psp);
if (ret)
return ret;
}
ret = psp_tmr_load(psp);
if (ret) {
DRM_ERROR("PSP load tmr failed!\n");
return ret;
}
return 0;
}
static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
enum psp_gfx_fw_type *type)
{
switch (ucode->ucode_id) {
case AMDGPU_UCODE_ID_CAP:
*type = GFX_FW_TYPE_CAP;
break;
case AMDGPU_UCODE_ID_SDMA0:
*type = GFX_FW_TYPE_SDMA0;
break;
case AMDGPU_UCODE_ID_SDMA1:
*type = GFX_FW_TYPE_SDMA1;
break;
case AMDGPU_UCODE_ID_SDMA2:
*type = GFX_FW_TYPE_SDMA2;
break;
case AMDGPU_UCODE_ID_SDMA3:
*type = GFX_FW_TYPE_SDMA3;
break;
case AMDGPU_UCODE_ID_SDMA4:
*type = GFX_FW_TYPE_SDMA4;
break;
case AMDGPU_UCODE_ID_SDMA5:
*type = GFX_FW_TYPE_SDMA5;
break;
case AMDGPU_UCODE_ID_SDMA6:
*type = GFX_FW_TYPE_SDMA6;
break;
case AMDGPU_UCODE_ID_SDMA7:
*type = GFX_FW_TYPE_SDMA7;
break;
case AMDGPU_UCODE_ID_CP_MES:
*type = GFX_FW_TYPE_CP_MES;
break;
case AMDGPU_UCODE_ID_CP_MES_DATA:
*type = GFX_FW_TYPE_MES_STACK;
break;
case AMDGPU_UCODE_ID_CP_MES1:
*type = GFX_FW_TYPE_CP_MES_KIQ;
break;
case AMDGPU_UCODE_ID_CP_MES1_DATA:
*type = GFX_FW_TYPE_MES_KIQ_STACK;
break;
case AMDGPU_UCODE_ID_CP_CE:
*type = GFX_FW_TYPE_CP_CE;
break;
case AMDGPU_UCODE_ID_CP_PFP:
*type = GFX_FW_TYPE_CP_PFP;
break;
case AMDGPU_UCODE_ID_CP_ME:
*type = GFX_FW_TYPE_CP_ME;
break;
case AMDGPU_UCODE_ID_CP_MEC1:
*type = GFX_FW_TYPE_CP_MEC;
break;
case AMDGPU_UCODE_ID_CP_MEC1_JT:
*type = GFX_FW_TYPE_CP_MEC_ME1;
break;
case AMDGPU_UCODE_ID_CP_MEC2:
*type = GFX_FW_TYPE_CP_MEC;
break;
case AMDGPU_UCODE_ID_CP_MEC2_JT:
*type = GFX_FW_TYPE_CP_MEC_ME2;
break;
case AMDGPU_UCODE_ID_RLC_P:
*type = GFX_FW_TYPE_RLC_P;
break;
case AMDGPU_UCODE_ID_RLC_V:
*type = GFX_FW_TYPE_RLC_V;
break;
case AMDGPU_UCODE_ID_RLC_G:
*type = GFX_FW_TYPE_RLC_G;
break;
case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
break;
case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
*type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
break;
case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
*type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
break;
case AMDGPU_UCODE_ID_RLC_IRAM:
*type = GFX_FW_TYPE_RLC_IRAM;
break;
case AMDGPU_UCODE_ID_RLC_DRAM:
*type = GFX_FW_TYPE_RLC_DRAM_BOOT;
break;
case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
*type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS;
break;
case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
*type = GFX_FW_TYPE_SE0_TAP_DELAYS;
break;
case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
*type = GFX_FW_TYPE_SE1_TAP_DELAYS;
break;
case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
*type = GFX_FW_TYPE_SE2_TAP_DELAYS;
break;
case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
*type = GFX_FW_TYPE_SE3_TAP_DELAYS;
break;
case AMDGPU_UCODE_ID_SMC:
*type = GFX_FW_TYPE_SMU;
break;
case AMDGPU_UCODE_ID_PPTABLE:
*type = GFX_FW_TYPE_PPTABLE;
break;
case AMDGPU_UCODE_ID_UVD:
*type = GFX_FW_TYPE_UVD;
break;
case AMDGPU_UCODE_ID_UVD1:
*type = GFX_FW_TYPE_UVD1;
break;
case AMDGPU_UCODE_ID_VCE:
*type = GFX_FW_TYPE_VCE;
break;
case AMDGPU_UCODE_ID_VCN:
*type = GFX_FW_TYPE_VCN;
break;
case AMDGPU_UCODE_ID_VCN1:
*type = GFX_FW_TYPE_VCN1;
break;
case AMDGPU_UCODE_ID_DMCU_ERAM:
*type = GFX_FW_TYPE_DMCU_ERAM;
break;
case AMDGPU_UCODE_ID_DMCU_INTV:
*type = GFX_FW_TYPE_DMCU_ISR;
break;
case AMDGPU_UCODE_ID_VCN0_RAM:
*type = GFX_FW_TYPE_VCN0_RAM;
break;
case AMDGPU_UCODE_ID_VCN1_RAM:
*type = GFX_FW_TYPE_VCN1_RAM;
break;
case AMDGPU_UCODE_ID_DMCUB:
*type = GFX_FW_TYPE_DMUB;
break;
case AMDGPU_UCODE_ID_SDMA_UCODE_TH0:
*type = GFX_FW_TYPE_SDMA_UCODE_TH0;
break;
case AMDGPU_UCODE_ID_SDMA_UCODE_TH1:
*type = GFX_FW_TYPE_SDMA_UCODE_TH1;
break;
case AMDGPU_UCODE_ID_IMU_I:
*type = GFX_FW_TYPE_IMU_I;
break;
case AMDGPU_UCODE_ID_IMU_D:
*type = GFX_FW_TYPE_IMU_D;
break;
case AMDGPU_UCODE_ID_CP_RS64_PFP:
*type = GFX_FW_TYPE_RS64_PFP;
break;
case AMDGPU_UCODE_ID_CP_RS64_ME:
*type = GFX_FW_TYPE_RS64_ME;
break;
case AMDGPU_UCODE_ID_CP_RS64_MEC:
*type = GFX_FW_TYPE_RS64_MEC;
break;
case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
*type = GFX_FW_TYPE_RS64_PFP_P0_STACK;
break;
case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
*type = GFX_FW_TYPE_RS64_PFP_P1_STACK;
break;
case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
*type = GFX_FW_TYPE_RS64_ME_P0_STACK;
break;
case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
*type = GFX_FW_TYPE_RS64_ME_P1_STACK;
break;
case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
*type = GFX_FW_TYPE_RS64_MEC_P0_STACK;
break;
case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
*type = GFX_FW_TYPE_RS64_MEC_P1_STACK;
break;
case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
*type = GFX_FW_TYPE_RS64_MEC_P2_STACK;
break;
case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
*type = GFX_FW_TYPE_RS64_MEC_P3_STACK;
break;
case AMDGPU_UCODE_ID_MAXIMUM:
default:
return -EINVAL;
}
return 0;
}
static void psp_print_fw_hdr(struct psp_context *psp,
struct amdgpu_firmware_info *ucode)
{
struct amdgpu_device *adev = psp->adev;
struct common_firmware_header *hdr;
switch (ucode->ucode_id) {
case AMDGPU_UCODE_ID_SDMA0:
case AMDGPU_UCODE_ID_SDMA1:
case AMDGPU_UCODE_ID_SDMA2:
case AMDGPU_UCODE_ID_SDMA3:
case AMDGPU_UCODE_ID_SDMA4:
case AMDGPU_UCODE_ID_SDMA5:
case AMDGPU_UCODE_ID_SDMA6:
case AMDGPU_UCODE_ID_SDMA7:
hdr = (struct common_firmware_header *)
adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
amdgpu_ucode_print_sdma_hdr(hdr);
break;
case AMDGPU_UCODE_ID_CP_CE:
hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
amdgpu_ucode_print_gfx_hdr(hdr);
break;
case AMDGPU_UCODE_ID_CP_PFP:
hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
amdgpu_ucode_print_gfx_hdr(hdr);
break;
case AMDGPU_UCODE_ID_CP_ME:
hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
amdgpu_ucode_print_gfx_hdr(hdr);
break;
case AMDGPU_UCODE_ID_CP_MEC1:
hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
amdgpu_ucode_print_gfx_hdr(hdr);
break;
case AMDGPU_UCODE_ID_RLC_G:
hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
amdgpu_ucode_print_rlc_hdr(hdr);
break;
case AMDGPU_UCODE_ID_SMC:
hdr = (struct common_firmware_header *)adev->pm.fw->data;
amdgpu_ucode_print_smc_hdr(hdr);
break;
default:
break;
}
}
static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode,
struct psp_gfx_cmd_resp *cmd)
{
int ret;
uint64_t fw_mem_mc_addr = ucode->mc_addr;
cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
if (ret)
DRM_ERROR("Unknown firmware type\n");
return ret;
}
int psp_execute_ip_fw_load(struct psp_context *psp,
struct amdgpu_firmware_info *ucode)
{
int ret = 0;
struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
ret = psp_prep_load_ip_fw_cmd_buf(ucode, cmd);
if (!ret) {
ret = psp_cmd_submit_buf(psp, ucode, cmd,
psp->fence_buf_mc_addr);
}
release_psp_cmd_buf(psp);
return ret;
}
static int psp_load_smu_fw(struct psp_context *psp)
{
int ret;
struct amdgpu_device *adev = psp->adev;
struct amdgpu_firmware_info *ucode =
&adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
struct amdgpu_ras *ras = psp->ras_context.ras;
/*
* Skip SMU FW reloading in case of using BACO for runpm only,
* as SMU is always alive.
*/
if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO))
return 0;
if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
return 0;
if ((amdgpu_in_reset(adev) &&
ras && adev->ras_enabled &&
(adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 4) ||
adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 2)))) {
ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
if (ret)
DRM_WARN("Failed to set MP1 state prepare for reload\n");
}
ret = psp_execute_ip_fw_load(psp, ucode);
if (ret)
DRM_ERROR("PSP load smu failed!\n");
return ret;
}
static bool fw_load_skip_check(struct psp_context *psp,
struct amdgpu_firmware_info *ucode)
{
if (!ucode->fw || !ucode->ucode_size)
return true;
if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
(psp_smu_reload_quirk(psp) ||
psp->autoload_supported ||
psp->pmfw_centralized_cstate_management))
return true;
if (amdgpu_sriov_vf(psp->adev) &&
amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id))
return true;
if (psp->autoload_supported &&
(ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
/* skip mec JT when autoload is enabled */
return true;
return false;
}
int psp_load_fw_list(struct psp_context *psp,
struct amdgpu_firmware_info **ucode_list, int ucode_count)
{
int ret = 0, i;
struct amdgpu_firmware_info *ucode;
for (i = 0; i < ucode_count; ++i) {
ucode = ucode_list[i];
psp_print_fw_hdr(psp, ucode);
ret = psp_execute_ip_fw_load(psp, ucode);
if (ret)
return ret;
}
return ret;
}
static int psp_load_non_psp_fw(struct psp_context *psp)
{
int i, ret;
struct amdgpu_firmware_info *ucode;
struct amdgpu_device *adev = psp->adev;
if (psp->autoload_supported &&
!psp->pmfw_centralized_cstate_management) {
ret = psp_load_smu_fw(psp);
if (ret)
return ret;
}
for (i = 0; i < adev->firmware.max_ucodes; i++) {
ucode = &adev->firmware.ucode[i];
if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
!fw_load_skip_check(psp, ucode)) {
ret = psp_load_smu_fw(psp);
if (ret)
return ret;
continue;
}
if (fw_load_skip_check(psp, ucode))
continue;
if (psp->autoload_supported &&
(adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7) ||
adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 11) ||
adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 12)) &&
(ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
/* PSP only receive one SDMA fw for sienna_cichlid,
* as all four sdma fw are same
*/
continue;
psp_print_fw_hdr(psp, ucode);
ret = psp_execute_ip_fw_load(psp, ucode);
if (ret)
return ret;
/* Start rlc autoload after psp recieved all the gfx firmware */
if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
ret = psp_rlc_autoload_start(psp);
if (ret) {
DRM_ERROR("Failed to start rlc autoload\n");
return ret;
}
}
}
return 0;
}
static int psp_load_fw(struct amdgpu_device *adev)
{
int ret;
struct psp_context *psp = &adev->psp;
if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
/* should not destroy ring, only stop */
psp_ring_stop(psp, PSP_RING_TYPE__KM);
} else {
memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
if (ret) {
DRM_ERROR("PSP ring init failed!\n");
goto failed;
}
}
ret = psp_hw_start(psp);
if (ret)
goto failed;
ret = psp_load_non_psp_fw(psp);
if (ret)
goto failed1;
ret = psp_asd_initialize(psp);
if (ret) {
DRM_ERROR("PSP load asd failed!\n");
goto failed1;
}
ret = psp_rl_load(adev);
if (ret) {
DRM_ERROR("PSP load RL failed!\n");
goto failed1;
}
if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
if (adev->gmc.xgmi.num_physical_nodes > 1) {
ret = psp_xgmi_initialize(psp, false, true);
/* Warning the XGMI seesion initialize failure
* Instead of stop driver initialization
*/
if (ret)
dev_err(psp->adev->dev,
"XGMI: Failed to initialize XGMI session\n");
}
}
if (psp->ta_fw) {
ret = psp_ras_initialize(psp);
if (ret)
dev_err(psp->adev->dev,
"RAS: Failed to initialize RAS\n");
ret = psp_hdcp_initialize(psp);
if (ret)
dev_err(psp->adev->dev,
"HDCP: Failed to initialize HDCP\n");
ret = psp_dtm_initialize(psp);
if (ret)
dev_err(psp->adev->dev,
"DTM: Failed to initialize DTM\n");
ret = psp_rap_initialize(psp);
if (ret)
dev_err(psp->adev->dev,
"RAP: Failed to initialize RAP\n");
ret = psp_securedisplay_initialize(psp);
if (ret)
dev_err(psp->adev->dev,
"SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
}
return 0;
failed1:
psp_free_shared_bufs(psp);
failed:
/*
* all cleanup jobs (xgmi terminate, ras terminate,
* ring destroy, cmd/fence/fw buffers destory,
* psp->cmd destory) are delayed to psp_hw_fini
*/
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
return ret;
}
static int psp_hw_init(void *handle)
{
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
mutex_lock(&adev->firmware.mutex);
/*
* This sequence is just used on hw_init only once, no need on
* resume.
*/
ret = amdgpu_ucode_init_bo(adev);
if (ret)
goto failed;
ret = psp_load_fw(adev);
if (ret) {
DRM_ERROR("PSP firmware loading failed\n");
goto failed;
}
mutex_unlock(&adev->firmware.mutex);
return 0;
failed:
adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
mutex_unlock(&adev->firmware.mutex);
return -EINVAL;
}
static int psp_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
if (psp->ta_fw) {
psp_ras_terminate(psp);
psp_securedisplay_terminate(psp);
psp_rap_terminate(psp);
psp_dtm_terminate(psp);
psp_hdcp_terminate(psp);
if (adev->gmc.xgmi.num_physical_nodes > 1)
psp_xgmi_terminate(psp);
}
psp_asd_terminate(psp);
psp_tmr_terminate(psp);
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
return 0;
}
static int psp_suspend(void *handle)
{
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
if (adev->gmc.xgmi.num_physical_nodes > 1 &&
psp->xgmi_context.context.initialized) {
ret = psp_xgmi_terminate(psp);
if (ret) {
DRM_ERROR("Failed to terminate xgmi ta\n");
goto out;
}
}
if (psp->ta_fw) {
ret = psp_ras_terminate(psp);
if (ret) {
DRM_ERROR("Failed to terminate ras ta\n");
goto out;
}
ret = psp_hdcp_terminate(psp);
if (ret) {
DRM_ERROR("Failed to terminate hdcp ta\n");
goto out;
}
ret = psp_dtm_terminate(psp);
if (ret) {
DRM_ERROR("Failed to terminate dtm ta\n");
goto out;
}
ret = psp_rap_terminate(psp);
if (ret) {
DRM_ERROR("Failed to terminate rap ta\n");
goto out;
}
ret = psp_securedisplay_terminate(psp);
if (ret) {
DRM_ERROR("Failed to terminate securedisplay ta\n");
goto out;
}
}
ret = psp_asd_terminate(psp);
if (ret) {
DRM_ERROR("Failed to terminate asd\n");
goto out;
}
ret = psp_tmr_terminate(psp);
if (ret) {
DRM_ERROR("Failed to terminate tmr\n");
goto out;
}
ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
if (ret)
DRM_ERROR("PSP ring stop failed\n");
out:
return ret;
}
static int psp_resume(void *handle)
{
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
DRM_INFO("PSP is resuming...\n");
if (psp->mem_train_ctx.enable_mem_training) {
ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
if (ret) {
DRM_ERROR("Failed to process memory training!\n");
return ret;
}
}
mutex_lock(&adev->firmware.mutex);
ret = psp_hw_start(psp);
if (ret)
goto failed;
ret = psp_load_non_psp_fw(psp);
if (ret)
goto failed;
ret = psp_asd_initialize(psp);
if (ret) {
DRM_ERROR("PSP load asd failed!\n");
goto failed;
}
ret = psp_rl_load(adev);
if (ret) {
dev_err(adev->dev, "PSP load RL failed!\n");
goto failed;
}
if (adev->gmc.xgmi.num_physical_nodes > 1) {
ret = psp_xgmi_initialize(psp, false, true);
/* Warning the XGMI seesion initialize failure
* Instead of stop driver initialization
*/
if (ret)
dev_err(psp->adev->dev,
"XGMI: Failed to initialize XGMI session\n");
}
if (psp->ta_fw) {
ret = psp_ras_initialize(psp);
if (ret)
dev_err(psp->adev->dev,
"RAS: Failed to initialize RAS\n");
ret = psp_hdcp_initialize(psp);
if (ret)
dev_err(psp->adev->dev,
"HDCP: Failed to initialize HDCP\n");
ret = psp_dtm_initialize(psp);
if (ret)
dev_err(psp->adev->dev,
"DTM: Failed to initialize DTM\n");
ret = psp_rap_initialize(psp);
if (ret)
dev_err(psp->adev->dev,
"RAP: Failed to initialize RAP\n");
ret = psp_securedisplay_initialize(psp);
if (ret)
dev_err(psp->adev->dev,
"SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
}
mutex_unlock(&adev->firmware.mutex);
return 0;
failed:
DRM_ERROR("PSP resume failed\n");
mutex_unlock(&adev->firmware.mutex);
return ret;
}
int psp_gpu_reset(struct amdgpu_device *adev)
{
int ret;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
return 0;
mutex_lock(&adev->psp.mutex);
ret = psp_mode1_reset(&adev->psp);
mutex_unlock(&adev->psp.mutex);
return ret;
}
int psp_rlc_autoload_start(struct psp_context *psp)
{
int ret;
struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
release_psp_cmd_buf(psp);
return ret;
}
int psp_ring_cmd_submit(struct psp_context *psp,
uint64_t cmd_buf_mc_addr,
uint64_t fence_mc_addr,
int index)
{
unsigned int psp_write_ptr_reg = 0;
struct psp_gfx_rb_frame *write_frame;
struct psp_ring *ring = &psp->km_ring;
struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
struct amdgpu_device *adev = psp->adev;
uint32_t ring_size_dw = ring->ring_size / 4;
uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
/* KM (GPCOM) prepare write pointer */
psp_write_ptr_reg = psp_ring_get_wptr(psp);
/* Update KM RB frame pointer to new frame */
/* write_frame ptr increments by size of rb_frame in bytes */
/* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
if ((psp_write_ptr_reg % ring_size_dw) == 0)
write_frame = ring_buffer_start;
else
write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
/* Check invalid write_frame ptr address */
if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
ring_buffer_start, ring_buffer_end, write_frame);
DRM_ERROR("write_frame is pointing to address out of bounds\n");
return -EINVAL;
}
/* Initialize KM RB frame */
memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
/* Update KM RB frame */
write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
write_frame->fence_value = index;
amdgpu_device_flush_hdp(adev, NULL);
/* Update the write Pointer in DWORDs */
psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
psp_ring_set_wptr(psp, psp_write_ptr_reg);
return 0;
}
int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
{
struct amdgpu_device *adev = psp->adev;
char fw_name[PSP_FW_NAME_LEN];
const struct psp_firmware_header_v1_0 *asd_hdr;
int err = 0;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, fw_name);
if (err)
goto out;
asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr +
le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
return 0;
out:
amdgpu_ucode_release(&adev->psp.asd_fw);
return err;
}
int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
{
struct amdgpu_device *adev = psp->adev;
char fw_name[PSP_FW_NAME_LEN];
const struct psp_firmware_header_v1_0 *toc_hdr;
int err = 0;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name);
if (err)
goto out;
toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
return 0;
out:
amdgpu_ucode_release(&adev->psp.toc_fw);
return err;
}
static int parse_sos_bin_descriptor(struct psp_context *psp,
const struct psp_fw_bin_desc *desc,
const struct psp_firmware_header_v2_0 *sos_hdr)
{
uint8_t *ucode_start_addr = NULL;
if (!psp || !desc || !sos_hdr)
return -EINVAL;
ucode_start_addr = (uint8_t *)sos_hdr +
le32_to_cpu(desc->offset_bytes) +
le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
switch (desc->fw_type) {
case PSP_FW_TYPE_PSP_SOS:
psp->sos.fw_version = le32_to_cpu(desc->fw_version);
psp->sos.feature_version = le32_to_cpu(desc->fw_version);
psp->sos.size_bytes = le32_to_cpu(desc->size_bytes);
psp->sos.start_addr = ucode_start_addr;
break;
case PSP_FW_TYPE_PSP_SYS_DRV:
psp->sys.fw_version = le32_to_cpu(desc->fw_version);
psp->sys.feature_version = le32_to_cpu(desc->fw_version);
psp->sys.size_bytes = le32_to_cpu(desc->size_bytes);
psp->sys.start_addr = ucode_start_addr;
break;
case PSP_FW_TYPE_PSP_KDB:
psp->kdb.fw_version = le32_to_cpu(desc->fw_version);
psp->kdb.feature_version = le32_to_cpu(desc->fw_version);
psp->kdb.size_bytes = le32_to_cpu(desc->size_bytes);
psp->kdb.start_addr = ucode_start_addr;
break;
case PSP_FW_TYPE_PSP_TOC:
psp->toc.fw_version = le32_to_cpu(desc->fw_version);
psp->toc.feature_version = le32_to_cpu(desc->fw_version);
psp->toc.size_bytes = le32_to_cpu(desc->size_bytes);
psp->toc.start_addr = ucode_start_addr;
break;
case PSP_FW_TYPE_PSP_SPL:
psp->spl.fw_version = le32_to_cpu(desc->fw_version);
psp->spl.feature_version = le32_to_cpu(desc->fw_version);
psp->spl.size_bytes = le32_to_cpu(desc->size_bytes);
psp->spl.start_addr = ucode_start_addr;
break;
case PSP_FW_TYPE_PSP_RL:
psp->rl.fw_version = le32_to_cpu(desc->fw_version);
psp->rl.feature_version = le32_to_cpu(desc->fw_version);
psp->rl.size_bytes = le32_to_cpu(desc->size_bytes);
psp->rl.start_addr = ucode_start_addr;
break;
case PSP_FW_TYPE_PSP_SOC_DRV:
psp->soc_drv.fw_version = le32_to_cpu(desc->fw_version);
psp->soc_drv.feature_version = le32_to_cpu(desc->fw_version);
psp->soc_drv.size_bytes = le32_to_cpu(desc->size_bytes);
psp->soc_drv.start_addr = ucode_start_addr;
break;
case PSP_FW_TYPE_PSP_INTF_DRV:
psp->intf_drv.fw_version = le32_to_cpu(desc->fw_version);
psp->intf_drv.feature_version = le32_to_cpu(desc->fw_version);
psp->intf_drv.size_bytes = le32_to_cpu(desc->size_bytes);
psp->intf_drv.start_addr = ucode_start_addr;
break;
case PSP_FW_TYPE_PSP_DBG_DRV:
psp->dbg_drv.fw_version = le32_to_cpu(desc->fw_version);
psp->dbg_drv.feature_version = le32_to_cpu(desc->fw_version);
psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes);
psp->dbg_drv.start_addr = ucode_start_addr;
break;
case PSP_FW_TYPE_PSP_RAS_DRV:
psp->ras_drv.fw_version = le32_to_cpu(desc->fw_version);
psp->ras_drv.feature_version = le32_to_cpu(desc->fw_version);
psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes);
psp->ras_drv.start_addr = ucode_start_addr;
break;
default:
dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
break;
}
return 0;
}
static int psp_init_sos_base_fw(struct amdgpu_device *adev)
{
const struct psp_firmware_header_v1_0 *sos_hdr;
const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
uint8_t *ucode_array_start_addr;
sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
ucode_array_start_addr = (uint8_t *)sos_hdr +
le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
if (adev->gmc.xgmi.connected_to_cpu ||
(adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 2))) {
adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes);
adev->psp.sys.start_addr = ucode_array_start_addr;
adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
adev->psp.sos.start_addr = ucode_array_start_addr +
le32_to_cpu(sos_hdr->sos.offset_bytes);
} else {
/* Load alternate PSP SOS FW */
sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes);
adev->psp.sys.start_addr = ucode_array_start_addr +
le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes);
adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
adev->psp.sos.start_addr = ucode_array_start_addr +
le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
}
if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
dev_warn(adev->dev, "PSP SOS FW not available");
return -EINVAL;
}
return 0;
}
int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
{
struct amdgpu_device *adev = psp->adev;
char fw_name[PSP_FW_NAME_LEN];
const struct psp_firmware_header_v1_0 *sos_hdr;
const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
const struct psp_firmware_header_v2_0 *sos_hdr_v2_0;
int err = 0;
uint8_t *ucode_array_start_addr;
int fw_index = 0;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, fw_name);
if (err)
goto out;
sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
ucode_array_start_addr = (uint8_t *)sos_hdr +
le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
switch (sos_hdr->header.header_version_major) {
case 1:
err = psp_init_sos_base_fw(adev);
if (err)
goto out;
if (sos_hdr->header.header_version_minor == 1) {
sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes);
adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr +
le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes);
adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes);
adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes);
}
if (sos_hdr->header.header_version_minor == 2) {
sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes);
adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes);
}
if (sos_hdr->header.header_version_minor == 3) {
sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes);
adev->psp.toc.start_addr = ucode_array_start_addr +
le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes);
adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes);
adev->psp.kdb.start_addr = ucode_array_start_addr +
le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes);
adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes);
adev->psp.spl.start_addr = ucode_array_start_addr +
le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes);
adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes);
adev->psp.rl.start_addr = ucode_array_start_addr +
le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes);
}
break;
case 2:
sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data;
if (le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
dev_err(adev->dev, "packed SOS count exceeds maximum limit\n");
err = -EINVAL;
goto out;
}
for (fw_index = 0; fw_index < le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); fw_index++) {
err = parse_sos_bin_descriptor(psp,
&sos_hdr_v2_0->psp_fw_bin[fw_index],
sos_hdr_v2_0);
if (err)
goto out;
}
break;
default:
dev_err(adev->dev,
"unsupported psp sos firmware\n");
err = -EINVAL;
goto out;
}
return 0;
out:
amdgpu_ucode_release(&adev->psp.sos_fw);
return err;
}
static int parse_ta_bin_descriptor(struct psp_context *psp,
const struct psp_fw_bin_desc *desc,
const struct ta_firmware_header_v2_0 *ta_hdr)
{
uint8_t *ucode_start_addr = NULL;
if (!psp || !desc || !ta_hdr)
return -EINVAL;
ucode_start_addr = (uint8_t *)ta_hdr +
le32_to_cpu(desc->offset_bytes) +
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
switch (desc->fw_type) {
case TA_FW_TYPE_PSP_ASD:
psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version);
psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
psp->asd_context.bin_desc.start_addr = ucode_start_addr;
break;
case TA_FW_TYPE_PSP_XGMI:
psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr;
break;
case TA_FW_TYPE_PSP_RAS:
psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
psp->ras_context.context.bin_desc.start_addr = ucode_start_addr;
break;
case TA_FW_TYPE_PSP_HDCP:
psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr;
break;
case TA_FW_TYPE_PSP_DTM:
psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr;
break;
case TA_FW_TYPE_PSP_RAP:
psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
psp->rap_context.context.bin_desc.start_addr = ucode_start_addr;
break;
case TA_FW_TYPE_PSP_SECUREDISPLAY:
psp->securedisplay_context.context.bin_desc.fw_version =
le32_to_cpu(desc->fw_version);
psp->securedisplay_context.context.bin_desc.size_bytes =
le32_to_cpu(desc->size_bytes);
psp->securedisplay_context.context.bin_desc.start_addr =
ucode_start_addr;
break;
default:
dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
break;
}
return 0;
}
static int parse_ta_v1_microcode(struct psp_context *psp)
{
const struct ta_firmware_header_v1_0 *ta_hdr;
struct amdgpu_device *adev = psp->adev;
ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data;
if (le16_to_cpu(ta_hdr->header.header_version_major) != 1)
return -EINVAL;
adev->psp.xgmi_context.context.bin_desc.fw_version =
le32_to_cpu(ta_hdr->xgmi.fw_version);
adev->psp.xgmi_context.context.bin_desc.size_bytes =
le32_to_cpu(ta_hdr->xgmi.size_bytes);
adev->psp.xgmi_context.context.bin_desc.start_addr =
(uint8_t *)ta_hdr +
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
adev->psp.ras_context.context.bin_desc.fw_version =
le32_to_cpu(ta_hdr->ras.fw_version);
adev->psp.ras_context.context.bin_desc.size_bytes =
le32_to_cpu(ta_hdr->ras.size_bytes);
adev->psp.ras_context.context.bin_desc.start_addr =
(uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
le32_to_cpu(ta_hdr->ras.offset_bytes);
adev->psp.hdcp_context.context.bin_desc.fw_version =
le32_to_cpu(ta_hdr->hdcp.fw_version);
adev->psp.hdcp_context.context.bin_desc.size_bytes =
le32_to_cpu(ta_hdr->hdcp.size_bytes);
adev->psp.hdcp_context.context.bin_desc.start_addr =
(uint8_t *)ta_hdr +
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
adev->psp.dtm_context.context.bin_desc.fw_version =
le32_to_cpu(ta_hdr->dtm.fw_version);
adev->psp.dtm_context.context.bin_desc.size_bytes =
le32_to_cpu(ta_hdr->dtm.size_bytes);
adev->psp.dtm_context.context.bin_desc.start_addr =
(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
le32_to_cpu(ta_hdr->dtm.offset_bytes);
adev->psp.securedisplay_context.context.bin_desc.fw_version =
le32_to_cpu(ta_hdr->securedisplay.fw_version);
adev->psp.securedisplay_context.context.bin_desc.size_bytes =
le32_to_cpu(ta_hdr->securedisplay.size_bytes);
adev->psp.securedisplay_context.context.bin_desc.start_addr =
(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
return 0;
}
static int parse_ta_v2_microcode(struct psp_context *psp)
{
const struct ta_firmware_header_v2_0 *ta_hdr;
struct amdgpu_device *adev = psp->adev;
int err = 0;
int ta_index = 0;
ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
if (le16_to_cpu(ta_hdr->header.header_version_major) != 2)
return -EINVAL;
if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
return -EINVAL;
}
for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
err = parse_ta_bin_descriptor(psp,
&ta_hdr->ta_fw_bin[ta_index],
ta_hdr);
if (err)
return err;
}
return 0;
}
int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
{
const struct common_firmware_header *hdr;
struct amdgpu_device *adev = psp->adev;
char fw_name[PSP_FW_NAME_LEN];
int err;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, fw_name);
if (err)
return err;
hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data;
switch (le16_to_cpu(hdr->header_version_major)) {
case 1:
err = parse_ta_v1_microcode(psp);
break;
case 2:
err = parse_ta_v2_microcode(psp);
break;
default:
dev_err(adev->dev, "unsupported TA header version\n");
err = -EINVAL;
}
if (err)
amdgpu_ucode_release(&adev->psp.ta_fw);
return err;
}
int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
{
struct amdgpu_device *adev = psp->adev;
char fw_name[PSP_FW_NAME_LEN];
const struct psp_firmware_header_v1_0 *cap_hdr_v1_0;
struct amdgpu_firmware_info *info = NULL;
int err = 0;
if (!amdgpu_sriov_vf(adev)) {
dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n");
return -EINVAL;
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_cap.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, fw_name);
if (err) {
if (err == -ENODEV) {
dev_warn(adev->dev, "cap microcode does not exist, skip\n");
err = 0;
goto out;
}
dev_err(adev->dev, "fail to initialize cap microcode\n");
}
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
info->ucode_id = AMDGPU_UCODE_ID_CAP;
info->fw = adev->psp.cap_fw;
cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *)
adev->psp.cap_fw->data;
adev->firmware.fw_size += ALIGN(
le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE);
adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version);
adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version);
adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes);
return 0;
out:
amdgpu_ucode_release(&adev->psp.cap_fw);
return err;
}
static int psp_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
return 0;
}
static int psp_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
return 0;
}
static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
uint32_t fw_ver;
int ret;
if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
DRM_INFO("PSP block is not ready yet.");
return -EBUSY;
}
mutex_lock(&adev->psp.mutex);
ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
mutex_unlock(&adev->psp.mutex);
if (ret) {
DRM_ERROR("Failed to read USBC PD FW, err = %d", ret);
return ret;
}
return sysfs_emit(buf, "%x\n", fw_ver);
}
static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
int ret, idx;
char fw_name[100];
const struct firmware *usbc_pd_fw;
struct amdgpu_bo *fw_buf_bo = NULL;
uint64_t fw_pri_mc_addr;
void *fw_pri_cpu_addr;
if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
DRM_INFO("PSP block is not ready yet.");
return -EBUSY;
}
if (!drm_dev_enter(ddev, &idx))
return -ENODEV;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf);
ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev);
if (ret)
goto fail;
/* LFB address which is aligned to 1MB boundary per PSP request */
ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&fw_buf_bo, &fw_pri_mc_addr,
&fw_pri_cpu_addr);
if (ret)
goto rel_buf;
memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
mutex_lock(&adev->psp.mutex);
ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr);
mutex_unlock(&adev->psp.mutex);
amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
rel_buf:
release_firmware(usbc_pd_fw);
fail:
if (ret) {
DRM_ERROR("Failed to load USBC PD FW, err = %d", ret);
count = ret;
}
drm_dev_exit(idx);
return count;
}
void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size)
{
int idx;
if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
return;
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
memcpy(psp->fw_pri_buf, start_addr, bin_size);
drm_dev_exit(idx);
}
/**
* DOC: usbc_pd_fw
* Reading from this file will retrieve the USB-C PD firmware version. Writing to
* this file will trigger the update process.
*/
static DEVICE_ATTR(usbc_pd_fw, 0644,
psp_usbc_pd_fw_sysfs_read,
psp_usbc_pd_fw_sysfs_write);
int is_psp_fw_valid(struct psp_bin_desc bin)
{
return bin.size_bytes;
}
static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
adev->psp.vbflash_done = false;
/* Safeguard against memory drain */
if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) {
dev_err(adev->dev, "File size cannot exceed %u", AMD_VBIOS_FILE_MAX_SIZE_B);
kvfree(adev->psp.vbflash_tmp_buf);
adev->psp.vbflash_tmp_buf = NULL;
adev->psp.vbflash_image_size = 0;
return -ENOMEM;
}
/* TODO Just allocate max for now and optimize to realloc later if needed */
if (!adev->psp.vbflash_tmp_buf) {
adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL);
if (!adev->psp.vbflash_tmp_buf)
return -ENOMEM;
}
mutex_lock(&adev->psp.mutex);
memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count);
adev->psp.vbflash_image_size += count;
mutex_unlock(&adev->psp.mutex);
dev_dbg(adev->dev, "IFWI staged for update");
return count;
}
static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buffer,
loff_t pos, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
struct amdgpu_bo *fw_buf_bo = NULL;
uint64_t fw_pri_mc_addr;
void *fw_pri_cpu_addr;
int ret;
if (adev->psp.vbflash_image_size == 0)
return -EINVAL;
dev_dbg(adev->dev, "PSP IFWI flash process initiated");
ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
AMDGPU_GPU_PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&fw_buf_bo,
&fw_pri_mc_addr,
&fw_pri_cpu_addr);
if (ret)
goto rel_buf;
memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size);
mutex_lock(&adev->psp.mutex);
ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr);
mutex_unlock(&adev->psp.mutex);
amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
rel_buf:
kvfree(adev->psp.vbflash_tmp_buf);
adev->psp.vbflash_tmp_buf = NULL;
adev->psp.vbflash_image_size = 0;
if (ret) {
dev_err(adev->dev, "Failed to load IFWI, err = %d", ret);
return ret;
}
dev_dbg(adev->dev, "PSP IFWI flash process done");
return 0;
}
/**
* DOC: psp_vbflash
* Writing to this file will stage an IFWI for update. Reading from this file
* will trigger the update process.
*/
static struct bin_attribute psp_vbflash_bin_attr = {
.attr = {.name = "psp_vbflash", .mode = 0660},
.size = 0,
.write = amdgpu_psp_vbflash_write,
.read = amdgpu_psp_vbflash_read,
};
/**
* DOC: psp_vbflash_status
* The status of the flash process.
* 0: IFWI flash not complete.
* 1: IFWI flash complete.
*/
static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
uint32_t vbflash_status;
vbflash_status = psp_vbflash_status(&adev->psp);
if (!adev->psp.vbflash_done)
vbflash_status = 0;
else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000))
vbflash_status = 1;
return sysfs_emit(buf, "0x%x\n", vbflash_status);
}
static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
static struct bin_attribute *bin_flash_attrs[] = {
&psp_vbflash_bin_attr,
NULL
};
static struct attribute *flash_attrs[] = {
&dev_attr_psp_vbflash_status.attr,
&dev_attr_usbc_pd_fw.attr,
NULL
};
static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
{
struct device *dev = kobj_to_dev(kobj);
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
if (attr == &dev_attr_usbc_pd_fw.attr)
return adev->psp.sup_pd_fw_up ? 0660 : 0;
return adev->psp.sup_ifwi_up ? 0440 : 0;
}
static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
struct bin_attribute *attr,
int idx)
{
struct device *dev = kobj_to_dev(kobj);
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
return adev->psp.sup_ifwi_up ? 0660 : 0;
}
const struct attribute_group amdgpu_flash_attr_group = {
.attrs = flash_attrs,
.bin_attrs = bin_flash_attrs,
.is_bin_visible = amdgpu_bin_flash_attr_is_visible,
.is_visible = amdgpu_flash_attr_is_visible,
};
const struct amd_ip_funcs psp_ip_funcs = {
.name = "psp",
.early_init = psp_early_init,
.late_init = NULL,
.sw_init = psp_sw_init,
.sw_fini = psp_sw_fini,
.hw_init = psp_hw_init,
.hw_fini = psp_hw_fini,
.suspend = psp_suspend,
.resume = psp_resume,
.is_idle = NULL,
.check_soft_reset = NULL,
.wait_for_idle = NULL,
.soft_reset = NULL,
.set_clockgating_state = psp_set_clockgating_state,
.set_powergating_state = psp_set_powergating_state,
};
const struct amdgpu_ip_block_version psp_v3_1_ip_block = {
.type = AMD_IP_BLOCK_TYPE_PSP,
.major = 3,
.minor = 1,
.rev = 0,
.funcs = &psp_ip_funcs,
};
const struct amdgpu_ip_block_version psp_v10_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_PSP,
.major = 10,
.minor = 0,
.rev = 0,
.funcs = &psp_ip_funcs,
};
const struct amdgpu_ip_block_version psp_v11_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_PSP,
.major = 11,
.minor = 0,
.rev = 0,
.funcs = &psp_ip_funcs,
};
const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = {
.type = AMD_IP_BLOCK_TYPE_PSP,
.major = 11,
.minor = 0,
.rev = 8,
.funcs = &psp_ip_funcs,
};
const struct amdgpu_ip_block_version psp_v12_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_PSP,
.major = 12,
.minor = 0,
.rev = 0,
.funcs = &psp_ip_funcs,
};
const struct amdgpu_ip_block_version psp_v13_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_PSP,
.major = 13,
.minor = 0,
.rev = 0,
.funcs = &psp_ip_funcs,
};
const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = {
.type = AMD_IP_BLOCK_TYPE_PSP,
.major = 13,
.minor = 0,
.rev = 4,
.funcs = &psp_ip_funcs,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c |
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/delay.h>
#include "amdgpu.h"
#include "lsdma_v6_0.h"
#include "amdgpu_lsdma.h"
#include "lsdma/lsdma_6_0_0_offset.h"
#include "lsdma/lsdma_6_0_0_sh_mask.h"
static int lsdma_v6_0_wait_pio_status(struct amdgpu_device *adev)
{
return amdgpu_lsdma_wait_for(adev, SOC15_REG_OFFSET(LSDMA, 0, regLSDMA_PIO_STATUS),
LSDMA_PIO_STATUS__PIO_IDLE_MASK | LSDMA_PIO_STATUS__PIO_FIFO_EMPTY_MASK,
LSDMA_PIO_STATUS__PIO_IDLE_MASK | LSDMA_PIO_STATUS__PIO_FIFO_EMPTY_MASK);
}
static int lsdma_v6_0_copy_mem(struct amdgpu_device *adev,
uint64_t src_addr,
uint64_t dst_addr,
uint64_t size)
{
int ret;
uint32_t tmp;
WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_SRC_ADDR_LO, lower_32_bits(src_addr));
WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_SRC_ADDR_HI, upper_32_bits(src_addr));
WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_LO, lower_32_bits(dst_addr));
WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_HI, upper_32_bits(dst_addr));
WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_CONTROL, 0x0);
tmp = RREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND);
tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, BYTE_COUNT, size);
tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, SRC_LOCATION, 0);
tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, DST_LOCATION, 0);
tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, SRC_ADDR_INC, 0);
tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, DST_ADDR_INC, 0);
tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, OVERLAP_DISABLE, 0);
tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, CONSTANT_FILL, 0);
WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND, tmp);
ret = lsdma_v6_0_wait_pio_status(adev);
if (ret)
dev_err(adev->dev, "LSDMA PIO failed to copy memory!\n");
return ret;
}
static int lsdma_v6_0_fill_mem(struct amdgpu_device *adev,
uint64_t dst_addr,
uint32_t data,
uint64_t size)
{
int ret;
uint32_t tmp;
WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_CONSTFILL_DATA, data);
WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_LO, lower_32_bits(dst_addr));
WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_HI, upper_32_bits(dst_addr));
WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_CONTROL, 0x0);
tmp = RREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND);
tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, BYTE_COUNT, size);
tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, SRC_LOCATION, 0);
tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, DST_LOCATION, 0);
tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, SRC_ADDR_INC, 0);
tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, DST_ADDR_INC, 0);
tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, OVERLAP_DISABLE, 0);
tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, CONSTANT_FILL, 1);
WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND, tmp);
ret = lsdma_v6_0_wait_pio_status(adev);
if (ret)
dev_err(adev->dev, "LSDMA PIO failed to fill memory!\n");
return ret;
}
static void lsdma_v6_0_update_memory_power_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t tmp;
tmp = RREG32_SOC15(LSDMA, 0, regLSDMA_MEM_POWER_CTRL);
tmp = REG_SET_FIELD(tmp, LSDMA_MEM_POWER_CTRL, MEM_POWER_CTRL_EN, 0);
WREG32_SOC15(LSDMA, 0, regLSDMA_MEM_POWER_CTRL, tmp);
tmp = REG_SET_FIELD(tmp, LSDMA_MEM_POWER_CTRL, MEM_POWER_CTRL_EN, enable);
WREG32_SOC15(LSDMA, 0, regLSDMA_MEM_POWER_CTRL, tmp);
}
const struct amdgpu_lsdma_funcs lsdma_v6_0_funcs = {
.copy_mem = lsdma_v6_0_copy_mem,
.fill_mem = lsdma_v6_0_fill_mem,
.update_memory_power_gating = lsdma_v6_0_update_memory_power_gating
};
| linux-master | drivers/gpu/drm/amd/amdgpu/lsdma_v6_0.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*
*/
#include <linux/list.h>
#include "amdgpu.h"
#include "amdgpu_xgmi.h"
#include "amdgpu_ras.h"
#include "soc15.h"
#include "df/df_3_6_offset.h"
#include "xgmi/xgmi_4_0_0_smn.h"
#include "xgmi/xgmi_4_0_0_sh_mask.h"
#include "xgmi/xgmi_6_1_0_sh_mask.h"
#include "wafl/wafl2_4_0_0_smn.h"
#include "wafl/wafl2_4_0_0_sh_mask.h"
#include "amdgpu_reset.h"
#define smnPCS_XGMI3X16_PCS_ERROR_STATUS 0x11a0020c
#define smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK 0x11a00218
#define smnPCS_GOPX1_PCS_ERROR_STATUS 0x12200210
#define smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK 0x12200218
static DEFINE_MUTEX(xgmi_mutex);
#define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE 4
static LIST_HEAD(xgmi_hive_list);
static const int xgmi_pcs_err_status_reg_vg20[] = {
smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
};
static const int wafl_pcs_err_status_reg_vg20[] = {
smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
};
static const int xgmi_pcs_err_status_reg_arct[] = {
smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x500000,
smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x600000,
smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x700000,
smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x800000,
};
/* same as vg20*/
static const int wafl_pcs_err_status_reg_arct[] = {
smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
};
static const int xgmi3x16_pcs_err_status_reg_aldebaran[] = {
smnPCS_XGMI3X16_PCS_ERROR_STATUS,
smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x100000,
smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x200000,
smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x300000,
smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x400000,
smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x500000,
smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x600000,
smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x700000
};
static const int xgmi3x16_pcs_err_noncorrectable_mask_reg_aldebaran[] = {
smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK,
smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x100000,
smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x200000,
smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x300000,
smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x400000,
smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x500000,
smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x600000,
smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x700000
};
static const int walf_pcs_err_status_reg_aldebaran[] = {
smnPCS_GOPX1_PCS_ERROR_STATUS,
smnPCS_GOPX1_PCS_ERROR_STATUS + 0x100000
};
static const int walf_pcs_err_noncorrectable_mask_reg_aldebaran[] = {
smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK,
smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK + 0x100000
};
static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = {
{"XGMI PCS DataLossErr",
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)},
{"XGMI PCS TrainingErr",
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TrainingErr)},
{"XGMI PCS CRCErr",
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, CRCErr)},
{"XGMI PCS BERExceededErr",
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, BERExceededErr)},
{"XGMI PCS TxMetaDataErr",
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TxMetaDataErr)},
{"XGMI PCS ReplayBufParityErr",
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayBufParityErr)},
{"XGMI PCS DataParityErr",
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataParityErr)},
{"XGMI PCS ReplayFifoOverflowErr",
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
{"XGMI PCS ReplayFifoUnderflowErr",
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
{"XGMI PCS ElasticFifoOverflowErr",
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
{"XGMI PCS DeskewErr",
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DeskewErr)},
{"XGMI PCS DataStartupLimitErr",
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataStartupLimitErr)},
{"XGMI PCS FCInitTimeoutErr",
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, FCInitTimeoutErr)},
{"XGMI PCS RecoveryTimeoutErr",
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
{"XGMI PCS ReadySerialTimeoutErr",
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
{"XGMI PCS ReadySerialAttemptErr",
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
{"XGMI PCS RecoveryAttemptErr",
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryAttemptErr)},
{"XGMI PCS RecoveryRelockAttemptErr",
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
};
static const struct amdgpu_pcs_ras_field wafl_pcs_ras_fields[] = {
{"WAFL PCS DataLossErr",
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataLossErr)},
{"WAFL PCS TrainingErr",
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TrainingErr)},
{"WAFL PCS CRCErr",
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, CRCErr)},
{"WAFL PCS BERExceededErr",
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, BERExceededErr)},
{"WAFL PCS TxMetaDataErr",
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TxMetaDataErr)},
{"WAFL PCS ReplayBufParityErr",
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayBufParityErr)},
{"WAFL PCS DataParityErr",
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataParityErr)},
{"WAFL PCS ReplayFifoOverflowErr",
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
{"WAFL PCS ReplayFifoUnderflowErr",
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
{"WAFL PCS ElasticFifoOverflowErr",
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
{"WAFL PCS DeskewErr",
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DeskewErr)},
{"WAFL PCS DataStartupLimitErr",
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataStartupLimitErr)},
{"WAFL PCS FCInitTimeoutErr",
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, FCInitTimeoutErr)},
{"WAFL PCS RecoveryTimeoutErr",
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
{"WAFL PCS ReadySerialTimeoutErr",
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
{"WAFL PCS ReadySerialAttemptErr",
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
{"WAFL PCS RecoveryAttemptErr",
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryAttemptErr)},
{"WAFL PCS RecoveryRelockAttemptErr",
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
};
static const struct amdgpu_pcs_ras_field xgmi3x16_pcs_ras_fields[] = {
{"XGMI3X16 PCS DataLossErr",
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DataLossErr)},
{"XGMI3X16 PCS TrainingErr",
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, TrainingErr)},
{"XGMI3X16 PCS FlowCtrlAckErr",
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, FlowCtrlAckErr)},
{"XGMI3X16 PCS RxFifoUnderflowErr",
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxFifoUnderflowErr)},
{"XGMI3X16 PCS RxFifoOverflowErr",
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxFifoOverflowErr)},
{"XGMI3X16 PCS CRCErr",
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, CRCErr)},
{"XGMI3X16 PCS BERExceededErr",
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, BERExceededErr)},
{"XGMI3X16 PCS TxVcidDataErr",
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, TxVcidDataErr)},
{"XGMI3X16 PCS ReplayBufParityErr",
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayBufParityErr)},
{"XGMI3X16 PCS DataParityErr",
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DataParityErr)},
{"XGMI3X16 PCS ReplayFifoOverflowErr",
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
{"XGMI3X16 PCS ReplayFifoUnderflowErr",
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
{"XGMI3X16 PCS ElasticFifoOverflowErr",
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
{"XGMI3X16 PCS DeskewErr",
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DeskewErr)},
{"XGMI3X16 PCS FlowCtrlCRCErr",
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, FlowCtrlCRCErr)},
{"XGMI3X16 PCS DataStartupLimitErr",
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DataStartupLimitErr)},
{"XGMI3X16 PCS FCInitTimeoutErr",
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, FCInitTimeoutErr)},
{"XGMI3X16 PCS RecoveryTimeoutErr",
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
{"XGMI3X16 PCS ReadySerialTimeoutErr",
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
{"XGMI3X16 PCS ReadySerialAttemptErr",
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
{"XGMI3X16 PCS RecoveryAttemptErr",
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RecoveryAttemptErr)},
{"XGMI3X16 PCS RecoveryRelockAttemptErr",
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
{"XGMI3X16 PCS ReplayAttemptErr",
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayAttemptErr)},
{"XGMI3X16 PCS SyncHdrErr",
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, SyncHdrErr)},
{"XGMI3X16 PCS TxReplayTimeoutErr",
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, TxReplayTimeoutErr)},
{"XGMI3X16 PCS RxReplayTimeoutErr",
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxReplayTimeoutErr)},
{"XGMI3X16 PCS LinkSubTxTimeoutErr",
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, LinkSubTxTimeoutErr)},
{"XGMI3X16 PCS LinkSubRxTimeoutErr",
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, LinkSubRxTimeoutErr)},
{"XGMI3X16 PCS RxCMDPktErr",
SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxCMDPktErr)},
};
/**
* DOC: AMDGPU XGMI Support
*
* XGMI is a high speed interconnect that joins multiple GPU cards
* into a homogeneous memory space that is organized by a collective
* hive ID and individual node IDs, both of which are 64-bit numbers.
*
* The file xgmi_device_id contains the unique per GPU device ID and
* is stored in the /sys/class/drm/card${cardno}/device/ directory.
*
* Inside the device directory a sub-directory 'xgmi_hive_info' is
* created which contains the hive ID and the list of nodes.
*
* The hive ID is stored in:
* /sys/class/drm/card${cardno}/device/xgmi_hive_info/xgmi_hive_id
*
* The node information is stored in numbered directories:
* /sys/class/drm/card${cardno}/device/xgmi_hive_info/node${nodeno}/xgmi_device_id
*
* Each device has their own xgmi_hive_info direction with a mirror
* set of node sub-directories.
*
* The XGMI memory space is built by contiguously adding the power of
* two padded VRAM space from each node to each other.
*
*/
static struct attribute amdgpu_xgmi_hive_id = {
.name = "xgmi_hive_id",
.mode = S_IRUGO
};
static struct attribute *amdgpu_xgmi_hive_attrs[] = {
&amdgpu_xgmi_hive_id,
NULL
};
ATTRIBUTE_GROUPS(amdgpu_xgmi_hive);
static ssize_t amdgpu_xgmi_show_attrs(struct kobject *kobj,
struct attribute *attr, char *buf)
{
struct amdgpu_hive_info *hive = container_of(
kobj, struct amdgpu_hive_info, kobj);
if (attr == &amdgpu_xgmi_hive_id)
return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id);
return 0;
}
static void amdgpu_xgmi_hive_release(struct kobject *kobj)
{
struct amdgpu_hive_info *hive = container_of(
kobj, struct amdgpu_hive_info, kobj);
amdgpu_reset_put_reset_domain(hive->reset_domain);
hive->reset_domain = NULL;
mutex_destroy(&hive->hive_lock);
kfree(hive);
}
static const struct sysfs_ops amdgpu_xgmi_hive_ops = {
.show = amdgpu_xgmi_show_attrs,
};
static const struct kobj_type amdgpu_xgmi_hive_type = {
.release = amdgpu_xgmi_hive_release,
.sysfs_ops = &amdgpu_xgmi_hive_ops,
.default_groups = amdgpu_xgmi_hive_groups,
};
static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
return sysfs_emit(buf, "%llu\n", adev->gmc.xgmi.node_id);
}
static ssize_t amdgpu_xgmi_show_num_hops(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
int i;
for (i = 0; i < top->num_nodes; i++)
sprintf(buf + 3 * i, "%02x ", top->nodes[i].num_hops);
return sysfs_emit(buf, "%s\n", buf);
}
static ssize_t amdgpu_xgmi_show_num_links(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
int i;
for (i = 0; i < top->num_nodes; i++)
sprintf(buf + 3 * i, "%02x ", top->nodes[i].num_links);
return sysfs_emit(buf, "%s\n", buf);
}
#define AMDGPU_XGMI_SET_FICAA(o) ((o) | 0x456801)
static ssize_t amdgpu_xgmi_show_error(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
uint32_t ficaa_pie_ctl_in, ficaa_pie_status_in;
uint64_t fica_out;
unsigned int error_count = 0;
ficaa_pie_ctl_in = AMDGPU_XGMI_SET_FICAA(0x200);
ficaa_pie_status_in = AMDGPU_XGMI_SET_FICAA(0x208);
if ((!adev->df.funcs) ||
(!adev->df.funcs->get_fica) ||
(!adev->df.funcs->set_fica))
return -EINVAL;
fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_ctl_in);
if (fica_out != 0x1f)
pr_err("xGMI error counters not enabled!\n");
fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_status_in);
if ((fica_out & 0xffff) == 2)
error_count = ((fica_out >> 62) & 0x1) + (fica_out >> 63);
adev->df.funcs->set_fica(adev, ficaa_pie_status_in, 0, 0);
return sysfs_emit(buf, "%u\n", error_count);
}
static DEVICE_ATTR(xgmi_device_id, S_IRUGO, amdgpu_xgmi_show_device_id, NULL);
static DEVICE_ATTR(xgmi_error, S_IRUGO, amdgpu_xgmi_show_error, NULL);
static DEVICE_ATTR(xgmi_num_hops, S_IRUGO, amdgpu_xgmi_show_num_hops, NULL);
static DEVICE_ATTR(xgmi_num_links, S_IRUGO, amdgpu_xgmi_show_num_links, NULL);
static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
struct amdgpu_hive_info *hive)
{
int ret = 0;
char node[10] = { 0 };
/* Create xgmi device id file */
ret = device_create_file(adev->dev, &dev_attr_xgmi_device_id);
if (ret) {
dev_err(adev->dev, "XGMI: Failed to create device file xgmi_device_id\n");
return ret;
}
/* Create xgmi error file */
ret = device_create_file(adev->dev, &dev_attr_xgmi_error);
if (ret)
pr_err("failed to create xgmi_error\n");
/* Create xgmi num hops file */
ret = device_create_file(adev->dev, &dev_attr_xgmi_num_hops);
if (ret)
pr_err("failed to create xgmi_num_hops\n");
/* Create xgmi num links file */
ret = device_create_file(adev->dev, &dev_attr_xgmi_num_links);
if (ret)
pr_err("failed to create xgmi_num_links\n");
/* Create sysfs link to hive info folder on the first device */
if (hive->kobj.parent != (&adev->dev->kobj)) {
ret = sysfs_create_link(&adev->dev->kobj, &hive->kobj,
"xgmi_hive_info");
if (ret) {
dev_err(adev->dev, "XGMI: Failed to create link to hive info");
goto remove_file;
}
}
sprintf(node, "node%d", atomic_read(&hive->number_devices));
/* Create sysfs link form the hive folder to yourself */
ret = sysfs_create_link(&hive->kobj, &adev->dev->kobj, node);
if (ret) {
dev_err(adev->dev, "XGMI: Failed to create link from hive info");
goto remove_link;
}
goto success;
remove_link:
sysfs_remove_link(&adev->dev->kobj, adev_to_drm(adev)->unique);
remove_file:
device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
device_remove_file(adev->dev, &dev_attr_xgmi_error);
device_remove_file(adev->dev, &dev_attr_xgmi_num_hops);
device_remove_file(adev->dev, &dev_attr_xgmi_num_links);
success:
return ret;
}
static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
struct amdgpu_hive_info *hive)
{
char node[10];
memset(node, 0, sizeof(node));
device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
device_remove_file(adev->dev, &dev_attr_xgmi_error);
device_remove_file(adev->dev, &dev_attr_xgmi_num_hops);
device_remove_file(adev->dev, &dev_attr_xgmi_num_links);
if (hive->kobj.parent != (&adev->dev->kobj))
sysfs_remove_link(&adev->dev->kobj,"xgmi_hive_info");
sprintf(node, "node%d", atomic_read(&hive->number_devices));
sysfs_remove_link(&hive->kobj, node);
}
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
{
struct amdgpu_hive_info *hive = NULL;
int ret;
if (!adev->gmc.xgmi.hive_id)
return NULL;
if (adev->hive) {
kobject_get(&adev->hive->kobj);
return adev->hive;
}
mutex_lock(&xgmi_mutex);
list_for_each_entry(hive, &xgmi_hive_list, node) {
if (hive->hive_id == adev->gmc.xgmi.hive_id)
goto pro_end;
}
hive = kzalloc(sizeof(*hive), GFP_KERNEL);
if (!hive) {
dev_err(adev->dev, "XGMI: allocation failed\n");
ret = -ENOMEM;
hive = NULL;
goto pro_end;
}
/* initialize new hive if not exist */
ret = kobject_init_and_add(&hive->kobj,
&amdgpu_xgmi_hive_type,
&adev->dev->kobj,
"%s", "xgmi_hive_info");
if (ret) {
dev_err(adev->dev, "XGMI: failed initializing kobject for xgmi hive\n");
kobject_put(&hive->kobj);
hive = NULL;
goto pro_end;
}
/**
* Only init hive->reset_domain for none SRIOV configuration. For SRIOV,
* Host driver decide how to reset the GPU either through FLR or chain reset.
* Guest side will get individual notifications from the host for the FLR
* if necessary.
*/
if (!amdgpu_sriov_vf(adev)) {
/**
* Avoid recreating reset domain when hive is reconstructed for the case
* of reset the devices in the XGMI hive during probe for passthrough GPU
* See https://www.spinics.net/lists/amd-gfx/msg58836.html
*/
if (adev->reset_domain->type != XGMI_HIVE) {
hive->reset_domain =
amdgpu_reset_create_reset_domain(XGMI_HIVE, "amdgpu-reset-hive");
if (!hive->reset_domain) {
dev_err(adev->dev, "XGMI: failed initializing reset domain for xgmi hive\n");
ret = -ENOMEM;
kobject_put(&hive->kobj);
hive = NULL;
goto pro_end;
}
} else {
amdgpu_reset_get_reset_domain(adev->reset_domain);
hive->reset_domain = adev->reset_domain;
}
}
hive->hive_id = adev->gmc.xgmi.hive_id;
INIT_LIST_HEAD(&hive->device_list);
INIT_LIST_HEAD(&hive->node);
mutex_init(&hive->hive_lock);
atomic_set(&hive->number_devices, 0);
task_barrier_init(&hive->tb);
hive->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN;
hive->hi_req_gpu = NULL;
/*
* hive pstate on boot is high in vega20 so we have to go to low
* pstate on after boot.
*/
hive->hi_req_count = AMDGPU_MAX_XGMI_DEVICE_PER_HIVE;
list_add_tail(&hive->node, &xgmi_hive_list);
pro_end:
if (hive)
kobject_get(&hive->kobj);
mutex_unlock(&xgmi_mutex);
return hive;
}
void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive)
{
if (hive)
kobject_put(&hive->kobj);
}
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
{
int ret = 0;
struct amdgpu_hive_info *hive;
struct amdgpu_device *request_adev;
bool is_hi_req = pstate == AMDGPU_XGMI_PSTATE_MAX_VEGA20;
bool init_low;
hive = amdgpu_get_xgmi_hive(adev);
if (!hive)
return 0;
request_adev = hive->hi_req_gpu ? hive->hi_req_gpu : adev;
init_low = hive->pstate == AMDGPU_XGMI_PSTATE_UNKNOWN;
amdgpu_put_xgmi_hive(hive);
/* fw bug so temporarily disable pstate switching */
return 0;
if (!hive || adev->asic_type != CHIP_VEGA20)
return 0;
mutex_lock(&hive->hive_lock);
if (is_hi_req)
hive->hi_req_count++;
else
hive->hi_req_count--;
/*
* Vega20 only needs single peer to request pstate high for the hive to
* go high but all peers must request pstate low for the hive to go low
*/
if (hive->pstate == pstate ||
(!is_hi_req && hive->hi_req_count && !init_low))
goto out;
dev_dbg(request_adev->dev, "Set xgmi pstate %d.\n", pstate);
ret = amdgpu_dpm_set_xgmi_pstate(request_adev, pstate);
if (ret) {
dev_err(request_adev->dev,
"XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
request_adev->gmc.xgmi.node_id,
request_adev->gmc.xgmi.hive_id, ret);
goto out;
}
if (init_low)
hive->pstate = hive->hi_req_count ?
hive->pstate : AMDGPU_XGMI_PSTATE_MIN;
else {
hive->pstate = pstate;
hive->hi_req_gpu = pstate != AMDGPU_XGMI_PSTATE_MIN ?
adev : NULL;
}
out:
mutex_unlock(&hive->hive_lock);
return ret;
}
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
{
int ret;
if (amdgpu_sriov_vf(adev))
return 0;
/* Each psp need to set the latest topology */
ret = psp_xgmi_set_topology_info(&adev->psp,
atomic_read(&hive->number_devices),
&adev->psp.xgmi_context.top_info);
if (ret)
dev_err(adev->dev,
"XGMI: Set topology failure on device %llx, hive %llx, ret %d",
adev->gmc.xgmi.node_id,
adev->gmc.xgmi.hive_id, ret);
return ret;
}
/*
* NOTE psp_xgmi_node_info.num_hops layout is as follows:
* num_hops[7:6] = link type (0 = xGMI2, 1 = xGMI3, 2/3 = reserved)
* num_hops[5:3] = reserved
* num_hops[2:0] = number of hops
*/
int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev)
{
struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
uint8_t num_hops_mask = 0x7;
int i;
for (i = 0 ; i < top->num_nodes; ++i)
if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
return top->nodes[i].num_hops & num_hops_mask;
return -EINVAL;
}
int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev)
{
struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
int i;
for (i = 0 ; i < top->num_nodes; ++i)
if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
return top->nodes[i].num_links;
return -EINVAL;
}
/*
* Devices that support extended data require the entire hive to initialize with
* the shared memory buffer flag set.
*
* Hive locks and conditions apply - see amdgpu_xgmi_add_device
*/
static int amdgpu_xgmi_initialize_hive_get_data_partition(struct amdgpu_hive_info *hive,
bool set_extended_data)
{
struct amdgpu_device *tmp_adev;
int ret;
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
ret = psp_xgmi_initialize(&tmp_adev->psp, set_extended_data, false);
if (ret) {
dev_err(tmp_adev->dev,
"XGMI: Failed to initialize xgmi session for data partition %i\n",
set_extended_data);
return ret;
}
}
return 0;
}
int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
{
struct psp_xgmi_topology_info *top_info;
struct amdgpu_hive_info *hive;
struct amdgpu_xgmi *entry;
struct amdgpu_device *tmp_adev = NULL;
int count = 0, ret = 0;
if (!adev->gmc.xgmi.supported)
return 0;
if (!adev->gmc.xgmi.pending_reset &&
amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
ret = psp_xgmi_initialize(&adev->psp, false, true);
if (ret) {
dev_err(adev->dev,
"XGMI: Failed to initialize xgmi session\n");
return ret;
}
ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id);
if (ret) {
dev_err(adev->dev,
"XGMI: Failed to get hive id\n");
return ret;
}
ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id);
if (ret) {
dev_err(adev->dev,
"XGMI: Failed to get node id\n");
return ret;
}
} else {
adev->gmc.xgmi.hive_id = 16;
adev->gmc.xgmi.node_id = adev->gmc.xgmi.physical_node_id + 16;
}
hive = amdgpu_get_xgmi_hive(adev);
if (!hive) {
ret = -EINVAL;
dev_err(adev->dev,
"XGMI: node 0x%llx, can not match hive 0x%llx in the hive list.\n",
adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id);
goto exit;
}
mutex_lock(&hive->hive_lock);
top_info = &adev->psp.xgmi_context.top_info;
list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
list_for_each_entry(entry, &hive->device_list, head)
top_info->nodes[count++].node_id = entry->node_id;
top_info->num_nodes = count;
atomic_set(&hive->number_devices, count);
task_barrier_add_task(&hive->tb);
if (!adev->gmc.xgmi.pending_reset &&
amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
/* update node list for other device in the hive */
if (tmp_adev != adev) {
top_info = &tmp_adev->psp.xgmi_context.top_info;
top_info->nodes[count - 1].node_id =
adev->gmc.xgmi.node_id;
top_info->num_nodes = count;
}
ret = amdgpu_xgmi_update_topology(hive, tmp_adev);
if (ret)
goto exit_unlock;
}
/* get latest topology info for each device from psp */
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
&tmp_adev->psp.xgmi_context.top_info, false);
if (ret) {
dev_err(tmp_adev->dev,
"XGMI: Get topology failure on device %llx, hive %llx, ret %d",
tmp_adev->gmc.xgmi.node_id,
tmp_adev->gmc.xgmi.hive_id, ret);
/* To do : continue with some node failed or disable the whole hive */
goto exit_unlock;
}
}
/* get topology again for hives that support extended data */
if (adev->psp.xgmi_context.supports_extended_data) {
/* initialize the hive to get extended data. */
ret = amdgpu_xgmi_initialize_hive_get_data_partition(hive, true);
if (ret)
goto exit_unlock;
/* get the extended data. */
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
&tmp_adev->psp.xgmi_context.top_info, true);
if (ret) {
dev_err(tmp_adev->dev,
"XGMI: Get topology for extended data failure on device %llx, hive %llx, ret %d",
tmp_adev->gmc.xgmi.node_id,
tmp_adev->gmc.xgmi.hive_id, ret);
goto exit_unlock;
}
}
/* initialize the hive to get non-extended data for the next round. */
ret = amdgpu_xgmi_initialize_hive_get_data_partition(hive, false);
if (ret)
goto exit_unlock;
}
}
if (!ret && !adev->gmc.xgmi.pending_reset)
ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive);
exit_unlock:
mutex_unlock(&hive->hive_lock);
exit:
if (!ret) {
adev->hive = hive;
dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n",
adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id);
} else {
amdgpu_put_xgmi_hive(hive);
dev_err(adev->dev, "XGMI: Failed to add node %d, hive 0x%llx ret: %d\n",
adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id,
ret);
}
return ret;
}
int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
{
struct amdgpu_hive_info *hive = adev->hive;
if (!adev->gmc.xgmi.supported)
return -EINVAL;
if (!hive)
return -EINVAL;
mutex_lock(&hive->hive_lock);
task_barrier_rem_task(&hive->tb);
amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
if (hive->hi_req_gpu == adev)
hive->hi_req_gpu = NULL;
list_del(&adev->gmc.xgmi.head);
mutex_unlock(&hive->hive_lock);
amdgpu_put_xgmi_hive(hive);
adev->hive = NULL;
if (atomic_dec_return(&hive->number_devices) == 0) {
/* Remove the hive from global hive list */
mutex_lock(&xgmi_mutex);
list_del(&hive->node);
mutex_unlock(&xgmi_mutex);
amdgpu_put_xgmi_hive(hive);
}
return 0;
}
static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
if (!adev->gmc.xgmi.supported ||
adev->gmc.xgmi.num_physical_nodes == 0)
return 0;
adev->gmc.xgmi.ras->ras_block.hw_ops->reset_ras_error_count(adev);
return amdgpu_ras_block_late_init(adev, ras_block);
}
uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
uint64_t addr)
{
struct amdgpu_xgmi *xgmi = &adev->gmc.xgmi;
return (addr + xgmi->physical_node_id * xgmi->node_segment_size);
}
static void pcs_clear_status(struct amdgpu_device *adev, uint32_t pcs_status_reg)
{
WREG32_PCIE(pcs_status_reg, 0xFFFFFFFF);
WREG32_PCIE(pcs_status_reg, 0);
}
static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
{
uint32_t i;
switch (adev->asic_type) {
case CHIP_ARCTURUS:
for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++)
pcs_clear_status(adev,
xgmi_pcs_err_status_reg_arct[i]);
break;
case CHIP_VEGA20:
for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++)
pcs_clear_status(adev,
xgmi_pcs_err_status_reg_vg20[i]);
break;
case CHIP_ALDEBARAN:
for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_aldebaran); i++)
pcs_clear_status(adev,
xgmi3x16_pcs_err_status_reg_aldebaran[i]);
for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++)
pcs_clear_status(adev,
walf_pcs_err_status_reg_aldebaran[i]);
break;
default:
break;
}
}
static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
uint32_t value,
uint32_t mask_value,
uint32_t *ue_count,
uint32_t *ce_count,
bool is_xgmi_pcs,
bool check_mask)
{
int i;
int ue_cnt = 0;
const struct amdgpu_pcs_ras_field *pcs_ras_fields = NULL;
uint32_t field_array_size = 0;
if (is_xgmi_pcs) {
if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(6, 1, 0)) {
pcs_ras_fields = &xgmi3x16_pcs_ras_fields[0];
field_array_size = ARRAY_SIZE(xgmi3x16_pcs_ras_fields);
} else {
pcs_ras_fields = &xgmi_pcs_ras_fields[0];
field_array_size = ARRAY_SIZE(xgmi_pcs_ras_fields);
}
} else {
pcs_ras_fields = &wafl_pcs_ras_fields[0];
field_array_size = ARRAY_SIZE(wafl_pcs_ras_fields);
}
if (check_mask)
value = value & ~mask_value;
/* query xgmi/walf pcs error status,
* only ue is supported */
for (i = 0; value && i < field_array_size; i++) {
ue_cnt = (value &
pcs_ras_fields[i].pcs_err_mask) >>
pcs_ras_fields[i].pcs_err_shift;
if (ue_cnt) {
dev_info(adev->dev, "%s detected\n",
pcs_ras_fields[i].err_name);
*ue_count += ue_cnt;
}
/* reset bit value if the bit is checked */
value &= ~(pcs_ras_fields[i].pcs_err_mask);
}
return 0;
}
static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
int i;
uint32_t data, mask_data = 0;
uint32_t ue_cnt = 0, ce_cnt = 0;
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL))
return ;
err_data->ue_count = 0;
err_data->ce_count = 0;
switch (adev->asic_type) {
case CHIP_ARCTURUS:
/* check xgmi pcs error */
for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++) {
data = RREG32_PCIE(xgmi_pcs_err_status_reg_arct[i]);
if (data)
amdgpu_xgmi_query_pcs_error_status(adev, data,
mask_data, &ue_cnt, &ce_cnt, true, false);
}
/* check wafl pcs error */
for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_arct); i++) {
data = RREG32_PCIE(wafl_pcs_err_status_reg_arct[i]);
if (data)
amdgpu_xgmi_query_pcs_error_status(adev, data,
mask_data, &ue_cnt, &ce_cnt, false, false);
}
break;
case CHIP_VEGA20:
/* check xgmi pcs error */
for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) {
data = RREG32_PCIE(xgmi_pcs_err_status_reg_vg20[i]);
if (data)
amdgpu_xgmi_query_pcs_error_status(adev, data,
mask_data, &ue_cnt, &ce_cnt, true, false);
}
/* check wafl pcs error */
for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_vg20); i++) {
data = RREG32_PCIE(wafl_pcs_err_status_reg_vg20[i]);
if (data)
amdgpu_xgmi_query_pcs_error_status(adev, data,
mask_data, &ue_cnt, &ce_cnt, false, false);
}
break;
case CHIP_ALDEBARAN:
/* check xgmi3x16 pcs error */
for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_aldebaran); i++) {
data = RREG32_PCIE(xgmi3x16_pcs_err_status_reg_aldebaran[i]);
mask_data =
RREG32_PCIE(xgmi3x16_pcs_err_noncorrectable_mask_reg_aldebaran[i]);
if (data)
amdgpu_xgmi_query_pcs_error_status(adev, data,
mask_data, &ue_cnt, &ce_cnt, true, true);
}
/* check wafl pcs error */
for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++) {
data = RREG32_PCIE(walf_pcs_err_status_reg_aldebaran[i]);
mask_data =
RREG32_PCIE(walf_pcs_err_noncorrectable_mask_reg_aldebaran[i]);
if (data)
amdgpu_xgmi_query_pcs_error_status(adev, data,
mask_data, &ue_cnt, &ce_cnt, false, true);
}
break;
default:
dev_warn(adev->dev, "XGMI RAS error query not supported");
break;
}
adev->gmc.xgmi.ras->ras_block.hw_ops->reset_ras_error_count(adev);
err_data->ue_count += ue_cnt;
err_data->ce_count += ce_cnt;
}
/* Trigger XGMI/WAFL error */
static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev,
void *inject_if, uint32_t instance_mask)
{
int ret = 0;
struct ta_ras_trigger_error_input *block_info =
(struct ta_ras_trigger_error_input *)inject_if;
if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
dev_warn(adev->dev, "Failed to disallow df cstate");
if (amdgpu_dpm_allow_xgmi_power_down(adev, false))
dev_warn(adev->dev, "Failed to disallow XGMI power down");
ret = psp_ras_trigger_error(&adev->psp, block_info, instance_mask);
if (amdgpu_ras_intr_triggered())
return ret;
if (amdgpu_dpm_allow_xgmi_power_down(adev, true))
dev_warn(adev->dev, "Failed to allow XGMI power down");
if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
dev_warn(adev->dev, "Failed to allow df cstate");
return ret;
}
struct amdgpu_ras_block_hw_ops xgmi_ras_hw_ops = {
.query_ras_error_count = amdgpu_xgmi_query_ras_error_count,
.reset_ras_error_count = amdgpu_xgmi_reset_ras_error_count,
.ras_error_inject = amdgpu_ras_error_inject_xgmi,
};
struct amdgpu_xgmi_ras xgmi_ras = {
.ras_block = {
.hw_ops = &xgmi_ras_hw_ops,
.ras_late_init = amdgpu_xgmi_ras_late_init,
},
};
int amdgpu_xgmi_ras_sw_init(struct amdgpu_device *adev)
{
int err;
struct amdgpu_xgmi_ras *ras;
if (!adev->gmc.xgmi.ras)
return 0;
ras = adev->gmc.xgmi.ras;
err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
if (err) {
dev_err(adev->dev, "Failed to register xgmi_wafl_pcs ras block!\n");
return err;
}
strcpy(ras->ras_block.ras_comm.name, "xgmi_wafl");
ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__XGMI_WAFL;
ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
adev->gmc.xgmi.ras_if = &ras->ras_block.ras_comm;
return 0;
}
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c |
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "vcn_sw_ring.h"
void vcn_dec_sw_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
u64 seq, uint32_t flags)
{
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
amdgpu_ring_write(ring, VCN_DEC_SW_CMD_FENCE);
amdgpu_ring_write(ring, addr);
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, seq);
amdgpu_ring_write(ring, VCN_DEC_SW_CMD_TRAP);
}
void vcn_dec_sw_ring_insert_end(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END);
}
void vcn_dec_sw_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
struct amdgpu_ib *ib, uint32_t flags)
{
uint32_t vmid = AMDGPU_JOB_GET_VMID(job);
amdgpu_ring_write(ring, VCN_DEC_SW_CMD_IB);
amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, ib->length_dw);
}
void vcn_dec_sw_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val, uint32_t mask)
{
amdgpu_ring_write(ring, VCN_DEC_SW_CMD_REG_WAIT);
amdgpu_ring_write(ring, reg << 2);
amdgpu_ring_write(ring, mask);
amdgpu_ring_write(ring, val);
}
void vcn_dec_sw_ring_emit_vm_flush(struct amdgpu_ring *ring,
uint32_t vmid, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
uint32_t data0, data1, mask;
pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
/* wait for register write */
data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
data1 = lower_32_bits(pd_addr);
mask = 0xffffffff;
vcn_dec_sw_ring_emit_reg_wait(ring, data0, data1, mask);
}
void vcn_dec_sw_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val)
{
amdgpu_ring_write(ring, VCN_DEC_SW_CMD_REG_WRITE);
amdgpu_ring_write(ring, reg << 2);
amdgpu_ring_write(ring, val);
}
| linux-master | drivers/gpu/drm/amd/amdgpu/vcn_sw_ring.c |
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/pci.h>
#include "amdgpu.h"
#include "amdgpu_ih.h"
#include "vid.h"
#include "oss/oss_2_4_d.h"
#include "oss/oss_2_4_sh_mask.h"
#include "bif/bif_5_1_d.h"
#include "bif/bif_5_1_sh_mask.h"
/*
* Interrupts
* Starting with r6xx, interrupts are handled via a ring buffer.
* Ring buffers are areas of GPU accessible memory that the GPU
* writes interrupt vectors into and the host reads vectors out of.
* There is a rptr (read pointer) that determines where the
* host is currently reading, and a wptr (write pointer)
* which determines where the GPU has written. When the
* pointers are equal, the ring is idle. When the GPU
* writes vectors to the ring buffer, it increments the
* wptr. When there is an interrupt, the host then starts
* fetching commands and processing them until the pointers are
* equal again at which point it updates the rptr.
*/
static void iceland_ih_set_interrupt_funcs(struct amdgpu_device *adev);
/**
* iceland_ih_enable_interrupts - Enable the interrupt ring buffer
*
* @adev: amdgpu_device pointer
*
* Enable the interrupt ring buffer (VI).
*/
static void iceland_ih_enable_interrupts(struct amdgpu_device *adev)
{
u32 ih_cntl = RREG32(mmIH_CNTL);
u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, ENABLE_INTR, 1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
WREG32(mmIH_CNTL, ih_cntl);
WREG32(mmIH_RB_CNTL, ih_rb_cntl);
adev->irq.ih.enabled = true;
}
/**
* iceland_ih_disable_interrupts - Disable the interrupt ring buffer
*
* @adev: amdgpu_device pointer
*
* Disable the interrupt ring buffer (VI).
*/
static void iceland_ih_disable_interrupts(struct amdgpu_device *adev)
{
u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
u32 ih_cntl = RREG32(mmIH_CNTL);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, ENABLE_INTR, 0);
WREG32(mmIH_RB_CNTL, ih_rb_cntl);
WREG32(mmIH_CNTL, ih_cntl);
/* set rptr, wptr to 0 */
WREG32(mmIH_RB_RPTR, 0);
WREG32(mmIH_RB_WPTR, 0);
adev->irq.ih.enabled = false;
adev->irq.ih.rptr = 0;
}
/**
* iceland_ih_irq_init - init and enable the interrupt ring
*
* @adev: amdgpu_device pointer
*
* Allocate a ring buffer for the interrupt controller,
* enable the RLC, disable interrupts, enable the IH
* ring buffer and enable it (VI).
* Called at device load and reume.
* Returns 0 for success, errors for failure.
*/
static int iceland_ih_irq_init(struct amdgpu_device *adev)
{
struct amdgpu_ih_ring *ih = &adev->irq.ih;
int rb_bufsz;
u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
/* disable irqs */
iceland_ih_disable_interrupts(adev);
/* setup interrupt control */
WREG32(mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
interrupt_cntl = RREG32(mmINTERRUPT_CNTL);
/* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
*/
interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
/* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
WREG32(mmINTERRUPT_CNTL, interrupt_cntl);
/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
WREG32(mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
ih_rb_cntl = REG_SET_FIELD(0, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
/* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register value is written to memory */
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1);
/* set the writeback address whether it's enabled or not */
WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr));
WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF);
WREG32(mmIH_RB_CNTL, ih_rb_cntl);
/* set rptr, wptr to 0 */
WREG32(mmIH_RB_RPTR, 0);
WREG32(mmIH_RB_WPTR, 0);
/* Default settings for IH_CNTL (disabled at first) */
ih_cntl = RREG32(mmIH_CNTL);
ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, MC_VMID, 0);
if (adev->irq.msi_enabled)
ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, RPTR_REARM, 1);
WREG32(mmIH_CNTL, ih_cntl);
pci_set_master(adev->pdev);
/* enable interrupts */
iceland_ih_enable_interrupts(adev);
return 0;
}
/**
* iceland_ih_irq_disable - disable interrupts
*
* @adev: amdgpu_device pointer
*
* Disable interrupts on the hw (VI).
*/
static void iceland_ih_irq_disable(struct amdgpu_device *adev)
{
iceland_ih_disable_interrupts(adev);
/* Wait and acknowledge irq */
mdelay(1);
}
/**
* iceland_ih_get_wptr - get the IH ring buffer wptr
*
* @adev: amdgpu_device pointer
* @ih: IH ring buffer to fetch wptr
*
* Get the IH ring buffer wptr from either the register
* or the writeback memory buffer (VI). Also check for
* ring buffer overflow and deal with it.
* Used by cz_irq_process(VI).
* Returns the value of the wptr.
*/
static u32 iceland_ih_get_wptr(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
u32 wptr, tmp;
wptr = le32_to_cpu(*ih->wptr_cpu);
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
/* Double check that the overflow wasn't already cleared. */
wptr = RREG32(mmIH_RB_WPTR);
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
/* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 16). Hopefully
* this should allow us to catchup.
*/
dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
ih->rptr = (wptr + 16) & ih->ptr_mask;
tmp = RREG32(mmIH_RB_CNTL);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32(mmIH_RB_CNTL, tmp);
out:
return (wptr & ih->ptr_mask);
}
/**
* iceland_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
* @ih: IH ring buffer to decode
* @entry: IV entry to place decoded information into
*
* Decodes the interrupt vector at the current rptr
* position and also advance the position.
*/
static void iceland_ih_decode_iv(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih,
struct amdgpu_iv_entry *entry)
{
/* wptr/rptr are in bytes! */
u32 ring_index = ih->rptr >> 2;
uint32_t dw[4];
dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
entry->src_id = dw[0] & 0xff;
entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff;
entry->vmid = (dw[2] >> 8) & 0xff;
entry->pasid = (dw[2] >> 16) & 0xffff;
/* wptr/rptr are in bytes! */
ih->rptr += 16;
}
/**
* iceland_ih_set_rptr - set the IH ring buffer rptr
*
* @adev: amdgpu_device pointer
* @ih: IH ring buffer to set rptr
*
* Set the IH ring buffer rptr.
*/
static void iceland_ih_set_rptr(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
WREG32(mmIH_RB_RPTR, ih->rptr);
}
static int iceland_ih_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
ret = amdgpu_irq_add_domain(adev);
if (ret)
return ret;
iceland_ih_set_interrupt_funcs(adev);
return 0;
}
static int iceland_ih_sw_init(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
if (r)
return r;
r = amdgpu_irq_init(adev);
return r;
}
static int iceland_ih_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_fini_sw(adev);
amdgpu_irq_remove_domain(adev);
return 0;
}
static int iceland_ih_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return iceland_ih_irq_init(adev);
}
static int iceland_ih_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
iceland_ih_irq_disable(adev);
return 0;
}
static int iceland_ih_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return iceland_ih_hw_fini(adev);
}
static int iceland_ih_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return iceland_ih_hw_init(adev);
}
static bool iceland_ih_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 tmp = RREG32(mmSRBM_STATUS);
if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY))
return false;
return true;
}
static int iceland_ih_wait_for_idle(void *handle)
{
unsigned i;
u32 tmp;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
/* read MC_STATUS */
tmp = RREG32(mmSRBM_STATUS);
if (!REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY))
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
static int iceland_ih_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__IH_BUSY_MASK)
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET,
SOFT_RESET_IH, 1);
if (srbm_soft_reset) {
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(mmSRBM_SOFT_RESET, tmp);
tmp = RREG32(mmSRBM_SOFT_RESET);
udelay(50);
tmp &= ~srbm_soft_reset;
WREG32(mmSRBM_SOFT_RESET, tmp);
tmp = RREG32(mmSRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
}
return 0;
}
static int iceland_ih_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
return 0;
}
static int iceland_ih_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
return 0;
}
static const struct amd_ip_funcs iceland_ih_ip_funcs = {
.name = "iceland_ih",
.early_init = iceland_ih_early_init,
.late_init = NULL,
.sw_init = iceland_ih_sw_init,
.sw_fini = iceland_ih_sw_fini,
.hw_init = iceland_ih_hw_init,
.hw_fini = iceland_ih_hw_fini,
.suspend = iceland_ih_suspend,
.resume = iceland_ih_resume,
.is_idle = iceland_ih_is_idle,
.wait_for_idle = iceland_ih_wait_for_idle,
.soft_reset = iceland_ih_soft_reset,
.set_clockgating_state = iceland_ih_set_clockgating_state,
.set_powergating_state = iceland_ih_set_powergating_state,
};
static const struct amdgpu_ih_funcs iceland_ih_funcs = {
.get_wptr = iceland_ih_get_wptr,
.decode_iv = iceland_ih_decode_iv,
.set_rptr = iceland_ih_set_rptr
};
static void iceland_ih_set_interrupt_funcs(struct amdgpu_device *adev)
{
adev->irq.ih_funcs = &iceland_ih_funcs;
}
const struct amdgpu_ip_block_version iceland_ih_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_IH,
.major = 2,
.minor = 4,
.rev = 0,
.funcs = &iceland_ih_ip_funcs,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/iceland_ih.c |
/*
* Copyright 2016 Advanced Micro Devices, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/dmi.h>
#include <linux/pci.h>
#include <linux/debugfs.h>
#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_vcn.h"
#include "soc15d.h"
/* Firmware Names */
#define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
#define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin"
#define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin"
#define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin"
#define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin"
#define FIRMWARE_GREEN_SARDINE "amdgpu/green_sardine_vcn.bin"
#define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin"
#define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin"
#define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin"
#define FIRMWARE_SIENNA_CICHLID "amdgpu/sienna_cichlid_vcn.bin"
#define FIRMWARE_NAVY_FLOUNDER "amdgpu/navy_flounder_vcn.bin"
#define FIRMWARE_VANGOGH "amdgpu/vangogh_vcn.bin"
#define FIRMWARE_DIMGREY_CAVEFISH "amdgpu/dimgrey_cavefish_vcn.bin"
#define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin"
#define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin"
#define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin"
#define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2.bin"
#define FIRMWARE_VCN4_0_0 "amdgpu/vcn_4_0_0.bin"
#define FIRMWARE_VCN4_0_2 "amdgpu/vcn_4_0_2.bin"
#define FIRMWARE_VCN4_0_3 "amdgpu/vcn_4_0_3.bin"
#define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN);
MODULE_FIRMWARE(FIRMWARE_PICASSO);
MODULE_FIRMWARE(FIRMWARE_RAVEN2);
MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
MODULE_FIRMWARE(FIRMWARE_RENOIR);
MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
MODULE_FIRMWARE(FIRMWARE_ALDEBARAN);
MODULE_FIRMWARE(FIRMWARE_NAVI10);
MODULE_FIRMWARE(FIRMWARE_NAVI14);
MODULE_FIRMWARE(FIRMWARE_NAVI12);
MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID);
MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER);
MODULE_FIRMWARE(FIRMWARE_VANGOGH);
MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH);
MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY);
MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP);
MODULE_FIRMWARE(FIRMWARE_VCN_3_1_2);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_0);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_2);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_3);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
int amdgpu_vcn_early_init(struct amdgpu_device *adev)
{
char ucode_prefix[30];
char fw_name[40];
int r;
amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
r = amdgpu_ucode_request(adev, &adev->vcn.fw, fw_name);
if (r)
amdgpu_ucode_release(&adev->vcn.fw);
return r;
}
int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
{
unsigned long bo_size;
const struct common_firmware_header *hdr;
unsigned char fw_check;
unsigned int fw_shared_size, log_offset;
int i, r;
INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
mutex_init(&adev->vcn.vcn_pg_lock);
mutex_init(&adev->vcn.vcn1_jpeg1_workaround);
atomic_set(&adev->vcn.total_submission_cnt, 0);
for (i = 0; i < adev->vcn.num_vcn_inst; i++)
atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
/*
* Some Steam Deck's BIOS versions are incompatible with the
* indirect SRAM mode, leading to amdgpu being unable to get
* properly probed (and even potentially crashing the kernel).
* Hence, check for these versions here - notice this is
* restricted to Vangogh (Deck's APU).
*/
if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 0, 2)) {
const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) ||
!strncmp("F7A0114", bios_ver, 7))) {
adev->vcn.indirect_sram = false;
dev_info(adev->dev,
"Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver);
}
}
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
/* Bit 20-23, it is encode major and non-zero for new naming convention.
* This field is part of version minor and DRM_DISABLED_FLAG in old naming
* convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
* is zero in old naming convention, this field is always zero so far.
* These four bits are used to tell which naming convention is present.
*/
fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
if (fw_check) {
unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
enc_major = fw_check;
dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
DRM_INFO("Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
enc_major, enc_minor, dec_ver, vep, fw_rev);
} else {
unsigned int version_major, version_minor, family_id;
family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
DRM_INFO("Found VCN firmware Version: %u.%u Family ID: %u\n",
version_major, version_minor, family_id);
}
bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0)) {
fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared));
log_offset = offsetof(struct amdgpu_vcn4_fw_shared, fw_log);
} else {
fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
log_offset = offsetof(struct amdgpu_fw_shared, fw_log);
}
bo_size += fw_shared_size;
if (amdgpu_vcnfw_log)
bo_size += AMDGPU_VCNFW_LOG_SIZE;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (adev->vcn.harvest_config & (1 << i))
continue;
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->vcn.inst[i].vcpu_bo,
&adev->vcn.inst[i].gpu_addr,
&adev->vcn.inst[i].cpu_addr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
return r;
}
adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr +
bo_size - fw_shared_size;
adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr +
bo_size - fw_shared_size;
adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size;
if (amdgpu_vcnfw_log) {
adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
adev->vcn.inst[i].fw_shared.log_offset = log_offset;
}
if (adev->vcn.indirect_sram) {
r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->vcn.inst[i].dpg_sram_bo,
&adev->vcn.inst[i].dpg_sram_gpu_addr,
&adev->vcn.inst[i].dpg_sram_cpu_addr);
if (r) {
dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
return r;
}
}
}
return 0;
}
int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
{
int i, j;
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
if (adev->vcn.harvest_config & (1 << j))
continue;
amdgpu_bo_free_kernel(
&adev->vcn.inst[j].dpg_sram_bo,
&adev->vcn.inst[j].dpg_sram_gpu_addr,
(void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
kvfree(adev->vcn.inst[j].saved_bo);
amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
&adev->vcn.inst[j].gpu_addr,
(void **)&adev->vcn.inst[j].cpu_addr);
amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
for (i = 0; i < adev->vcn.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
}
amdgpu_ucode_release(&adev->vcn.fw);
mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
mutex_destroy(&adev->vcn.vcn_pg_lock);
return 0;
}
/* from vcn4 and above, only unified queue is used */
static bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
bool ret = false;
if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0))
ret = true;
return ret;
}
bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
{
bool ret = false;
int vcn_config = adev->vcn.vcn_config[vcn_instance];
if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK))
ret = true;
else if ((type == VCN_DECODE_RING) && (vcn_config & VCN_BLOCK_DECODE_DISABLE_MASK))
ret = true;
else if ((type == VCN_UNIFIED_RING) && (vcn_config & VCN_BLOCK_QUEUE_DISABLE_MASK))
ret = true;
return ret;
}
int amdgpu_vcn_suspend(struct amdgpu_device *adev)
{
unsigned int size;
void *ptr;
int i, idx;
cancel_delayed_work_sync(&adev->vcn.idle_work);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
if (adev->vcn.inst[i].vcpu_bo == NULL)
return 0;
size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
ptr = adev->vcn.inst[i].cpu_addr;
adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
if (!adev->vcn.inst[i].saved_bo)
return -ENOMEM;
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
drm_dev_exit(idx);
}
}
return 0;
}
int amdgpu_vcn_resume(struct amdgpu_device *adev)
{
unsigned int size;
void *ptr;
int i, idx;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
if (adev->vcn.inst[i].vcpu_bo == NULL)
return -EINVAL;
size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
ptr = adev->vcn.inst[i].cpu_addr;
if (adev->vcn.inst[i].saved_bo != NULL) {
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
drm_dev_exit(idx);
}
kvfree(adev->vcn.inst[i].saved_bo);
adev->vcn.inst[i].saved_bo = NULL;
} else {
const struct common_firmware_header *hdr;
unsigned int offset;
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
le32_to_cpu(hdr->ucode_size_bytes));
drm_dev_exit(idx);
}
size -= le32_to_cpu(hdr->ucode_size_bytes);
ptr += le32_to_cpu(hdr->ucode_size_bytes);
}
memset_io(ptr, 0, size);
}
}
return 0;
}
static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, vcn.idle_work.work);
unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
unsigned int i, j;
int r = 0;
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
if (adev->vcn.harvest_config & (1 << j))
continue;
for (i = 0; i < adev->vcn.num_enc_rings; ++i)
fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
struct dpg_pause_state new_state;
if (fence[j] ||
unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
new_state.fw_based = VCN_DPG_STATE__PAUSE;
else
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
adev->vcn.pause_dpg_mode(adev, j, &new_state);
}
fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
fences += fence[j];
}
if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_GATE);
r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
false);
if (r)
dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
} else {
schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
}
}
void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
int r = 0;
atomic_inc(&adev->vcn.total_submission_cnt);
if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) {
r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
true);
if (r)
dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
}
mutex_lock(&adev->vcn.vcn_pg_lock);
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_UNGATE);
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
struct dpg_pause_state new_state;
if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
new_state.fw_based = VCN_DPG_STATE__PAUSE;
} else {
unsigned int fences = 0;
unsigned int i;
for (i = 0; i < adev->vcn.num_enc_rings; ++i)
fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
new_state.fw_based = VCN_DPG_STATE__PAUSE;
else
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
}
adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
}
mutex_unlock(&adev->vcn.vcn_pg_lock);
}
void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
{
if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
atomic_dec(&ring->adev->vcn.total_submission_cnt);
schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
}
int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
uint32_t tmp = 0;
unsigned int i;
int r;
/* VCN in SRIOV does not support direct register read/write */
if (amdgpu_sriov_vf(adev))
return 0;
WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
if (r)
return r;
amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
if (tmp == 0xDEADBEEF)
break;
udelay(1);
}
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
return r;
}
int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
uint32_t rptr;
unsigned int i;
int r;
if (amdgpu_sriov_vf(adev))
return 0;
r = amdgpu_ring_alloc(ring, 16);
if (r)
return r;
rptr = amdgpu_ring_get_rptr(ring);
amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
if (amdgpu_ring_get_rptr(ring) != rptr)
break;
udelay(1);
}
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
return r;
}
static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
struct amdgpu_ib *ib_msg,
struct dma_fence **fence)
{
u64 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
struct amdgpu_device *adev = ring->adev;
struct dma_fence *f = NULL;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
64, AMDGPU_IB_POOL_DIRECT,
&job);
if (r)
goto err;
ib = &job->ibs[0];
ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
ib->ptr[1] = addr;
ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
ib->ptr[3] = addr >> 32;
ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
ib->ptr[5] = 0;
for (i = 6; i < 16; i += 2) {
ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
ib->ptr[i+1] = 0;
}
ib->length_dw = 16;
r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err_free;
amdgpu_ib_free(adev, ib_msg, f);
if (fence)
*fence = dma_fence_get(f);
dma_fence_put(f);
return 0;
err_free:
amdgpu_job_free(job);
err:
amdgpu_ib_free(adev, ib_msg, f);
return r;
}
static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
struct amdgpu_ib *ib)
{
struct amdgpu_device *adev = ring->adev;
uint32_t *msg;
int r, i;
memset(ib, 0, sizeof(*ib));
r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
AMDGPU_IB_POOL_DIRECT,
ib);
if (r)
return r;
msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
msg[0] = cpu_to_le32(0x00000028);
msg[1] = cpu_to_le32(0x00000038);
msg[2] = cpu_to_le32(0x00000001);
msg[3] = cpu_to_le32(0x00000000);
msg[4] = cpu_to_le32(handle);
msg[5] = cpu_to_le32(0x00000000);
msg[6] = cpu_to_le32(0x00000001);
msg[7] = cpu_to_le32(0x00000028);
msg[8] = cpu_to_le32(0x00000010);
msg[9] = cpu_to_le32(0x00000000);
msg[10] = cpu_to_le32(0x00000007);
msg[11] = cpu_to_le32(0x00000000);
msg[12] = cpu_to_le32(0x00000780);
msg[13] = cpu_to_le32(0x00000440);
for (i = 14; i < 1024; ++i)
msg[i] = cpu_to_le32(0x0);
return 0;
}
static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
struct amdgpu_ib *ib)
{
struct amdgpu_device *adev = ring->adev;
uint32_t *msg;
int r, i;
memset(ib, 0, sizeof(*ib));
r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
AMDGPU_IB_POOL_DIRECT,
ib);
if (r)
return r;
msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
msg[0] = cpu_to_le32(0x00000028);
msg[1] = cpu_to_le32(0x00000018);
msg[2] = cpu_to_le32(0x00000000);
msg[3] = cpu_to_le32(0x00000002);
msg[4] = cpu_to_le32(handle);
msg[5] = cpu_to_le32(0x00000000);
for (i = 6; i < 1024; ++i)
msg[i] = cpu_to_le32(0x0);
return 0;
}
int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
struct amdgpu_ib ib;
long r;
r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
if (r)
goto error;
r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL);
if (r)
goto error;
r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
if (r)
goto error;
r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence);
if (r)
goto error;
r = dma_fence_wait_timeout(fence, false, timeout);
if (r == 0)
r = -ETIMEDOUT;
else if (r > 0)
r = 0;
dma_fence_put(fence);
error:
return r;
}
static uint32_t *amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib *ib,
uint32_t ib_pack_in_dw, bool enc)
{
uint32_t *ib_checksum;
ib->ptr[ib->length_dw++] = 0x00000010; /* single queue checksum */
ib->ptr[ib->length_dw++] = 0x30000002;
ib_checksum = &ib->ptr[ib->length_dw++];
ib->ptr[ib->length_dw++] = ib_pack_in_dw;
ib->ptr[ib->length_dw++] = 0x00000010; /* engine info */
ib->ptr[ib->length_dw++] = 0x30000001;
ib->ptr[ib->length_dw++] = enc ? 0x2 : 0x3;
ib->ptr[ib->length_dw++] = ib_pack_in_dw * sizeof(uint32_t);
return ib_checksum;
}
static void amdgpu_vcn_unified_ring_ib_checksum(uint32_t **ib_checksum,
uint32_t ib_pack_in_dw)
{
uint32_t i;
uint32_t checksum = 0;
for (i = 0; i < ib_pack_in_dw; i++)
checksum += *(*ib_checksum + 2 + i);
**ib_checksum = checksum;
}
static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
struct amdgpu_ib *ib_msg,
struct dma_fence **fence)
{
struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
unsigned int ib_size_dw = 64;
struct amdgpu_device *adev = ring->adev;
struct dma_fence *f = NULL;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
bool sq = amdgpu_vcn_using_unified_queue(ring);
uint32_t *ib_checksum;
uint32_t ib_pack_in_dw;
int i, r;
if (sq)
ib_size_dw += 8;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
&job);
if (r)
goto err;
ib = &job->ibs[0];
ib->length_dw = 0;
/* single queue headers */
if (sq) {
ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
+ 4 + 2; /* engine info + decoding ib in dw */
ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
}
ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
ib->ptr[ib->length_dw++] = cpu_to_le32(AMDGPU_VCN_IB_FLAG_DECODE_BUFFER);
decode_buffer = (struct amdgpu_vcn_decode_buffer *)&(ib->ptr[ib->length_dw]);
ib->length_dw += sizeof(struct amdgpu_vcn_decode_buffer) / 4;
memset(decode_buffer, 0, sizeof(struct amdgpu_vcn_decode_buffer));
decode_buffer->valid_buf_flag |= cpu_to_le32(AMDGPU_VCN_CMD_FLAG_MSG_BUFFER);
decode_buffer->msg_buffer_address_hi = cpu_to_le32(addr >> 32);
decode_buffer->msg_buffer_address_lo = cpu_to_le32(addr);
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
if (sq)
amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err_free;
amdgpu_ib_free(adev, ib_msg, f);
if (fence)
*fence = dma_fence_get(f);
dma_fence_put(f);
return 0;
err_free:
amdgpu_job_free(job);
err:
amdgpu_ib_free(adev, ib_msg, f);
return r;
}
int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
struct amdgpu_ib ib;
long r;
r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
if (r)
goto error;
r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL);
if (r)
goto error;
r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
if (r)
goto error;
r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence);
if (r)
goto error;
r = dma_fence_wait_timeout(fence, false, timeout);
if (r == 0)
r = -ETIMEDOUT;
else if (r > 0)
r = 0;
dma_fence_put(fence);
error:
return r;
}
int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
uint32_t rptr;
unsigned int i;
int r;
if (amdgpu_sriov_vf(adev))
return 0;
r = amdgpu_ring_alloc(ring, 16);
if (r)
return r;
rptr = amdgpu_ring_get_rptr(ring);
amdgpu_ring_write(ring, VCN_ENC_CMD_END);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
if (amdgpu_ring_get_rptr(ring) != rptr)
break;
udelay(1);
}
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
return r;
}
static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
struct amdgpu_ib *ib_msg,
struct dma_fence **fence)
{
unsigned int ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
uint32_t *ib_checksum = NULL;
uint64_t addr;
bool sq = amdgpu_vcn_using_unified_queue(ring);
int i, r;
if (sq)
ib_size_dw += 8;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
&job);
if (r)
return r;
ib = &job->ibs[0];
addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
ib->length_dw = 0;
if (sq)
ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
ib->ptr[ib->length_dw++] = handle;
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x0000000b;
ib->ptr[ib->length_dw++] = 0x00000014;
ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
ib->ptr[ib->length_dw++] = 0x0000001c;
ib->ptr[ib->length_dw++] = 0x00000000;
ib->ptr[ib->length_dw++] = 0x00000000;
ib->ptr[ib->length_dw++] = 0x00000008;
ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
if (sq)
amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err;
if (fence)
*fence = dma_fence_get(f);
dma_fence_put(f);
return 0;
err:
amdgpu_job_free(job);
return r;
}
static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
struct amdgpu_ib *ib_msg,
struct dma_fence **fence)
{
unsigned int ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
uint32_t *ib_checksum = NULL;
uint64_t addr;
bool sq = amdgpu_vcn_using_unified_queue(ring);
int i, r;
if (sq)
ib_size_dw += 8;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
&job);
if (r)
return r;
ib = &job->ibs[0];
addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
ib->length_dw = 0;
if (sq)
ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001;
ib->ptr[ib->length_dw++] = handle;
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x0000000b;
ib->ptr[ib->length_dw++] = 0x00000014;
ib->ptr[ib->length_dw++] = 0x00000002;
ib->ptr[ib->length_dw++] = 0x0000001c;
ib->ptr[ib->length_dw++] = 0x00000000;
ib->ptr[ib->length_dw++] = 0x00000000;
ib->ptr[ib->length_dw++] = 0x00000008;
ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
if (sq)
amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err;
if (fence)
*fence = dma_fence_get(f);
dma_fence_put(f);
return 0;
err:
amdgpu_job_free(job);
return r;
}
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence = NULL;
struct amdgpu_ib ib;
long r;
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, (128 << 10) + AMDGPU_GPU_PAGE_SIZE,
AMDGPU_IB_POOL_DIRECT,
&ib);
if (r)
return r;
r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL);
if (r)
goto error;
r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence);
if (r)
goto error;
r = dma_fence_wait_timeout(fence, false, timeout);
if (r == 0)
r = -ETIMEDOUT;
else if (r > 0)
r = 0;
error:
amdgpu_ib_free(adev, &ib, fence);
dma_fence_put(fence);
return r;
}
int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
long r;
if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(4, 0, 3)) {
r = amdgpu_vcn_enc_ring_test_ib(ring, timeout);
if (r)
goto error;
}
r = amdgpu_vcn_dec_sw_ring_test_ib(ring, timeout);
error:
return r;
}
enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring)
{
switch (ring) {
case 0:
return AMDGPU_RING_PRIO_0;
case 1:
return AMDGPU_RING_PRIO_1;
case 2:
return AMDGPU_RING_PRIO_2;
default:
return AMDGPU_RING_PRIO_0;
}
}
void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
{
int i;
unsigned int idx;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
const struct common_firmware_header *hdr;
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (adev->vcn.harvest_config & (1 << i))
continue;
/* currently only support 2 FW instances */
if (i >= 2) {
dev_info(adev->dev, "More then 2 VCN FW instances!\n");
break;
}
idx = AMDGPU_UCODE_ID_VCN + i;
adev->firmware.ucode[idx].ucode_id = idx;
adev->firmware.ucode[idx].fw = adev->vcn.fw;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(4, 0, 3))
break;
}
dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
}
}
/*
* debugfs for mapping vcn firmware log buffer.
*/
#if defined(CONFIG_DEBUG_FS)
static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_vcn_inst *vcn;
void *log_buf;
volatile struct amdgpu_vcn_fwlog *plog;
unsigned int read_pos, write_pos, available, i, read_bytes = 0;
unsigned int read_num[2] = {0};
vcn = file_inode(f)->i_private;
if (!vcn)
return -ENODEV;
if (!vcn->fw_shared.cpu_addr || !amdgpu_vcnfw_log)
return -EFAULT;
log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
plog = (volatile struct amdgpu_vcn_fwlog *)log_buf;
read_pos = plog->rptr;
write_pos = plog->wptr;
if (read_pos > AMDGPU_VCNFW_LOG_SIZE || write_pos > AMDGPU_VCNFW_LOG_SIZE)
return -EFAULT;
if (!size || (read_pos == write_pos))
return 0;
if (write_pos > read_pos) {
available = write_pos - read_pos;
read_num[0] = min(size, (size_t)available);
} else {
read_num[0] = AMDGPU_VCNFW_LOG_SIZE - read_pos;
available = read_num[0] + write_pos - plog->header_size;
if (size > available)
read_num[1] = write_pos - plog->header_size;
else if (size > read_num[0])
read_num[1] = size - read_num[0];
else
read_num[0] = size;
}
for (i = 0; i < 2; i++) {
if (read_num[i]) {
if (read_pos == AMDGPU_VCNFW_LOG_SIZE)
read_pos = plog->header_size;
if (read_num[i] == copy_to_user((buf + read_bytes),
(log_buf + read_pos), read_num[i]))
return -EFAULT;
read_bytes += read_num[i];
read_pos += read_num[i];
}
}
plog->rptr = read_pos;
*pos += read_bytes;
return read_bytes;
}
static const struct file_operations amdgpu_debugfs_vcnfwlog_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_vcn_fwlog_read,
.llseek = default_llseek
};
#endif
void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i,
struct amdgpu_vcn_inst *vcn)
{
#if defined(CONFIG_DEBUG_FS)
struct drm_minor *minor = adev_to_drm(adev)->primary;
struct dentry *root = minor->debugfs_root;
char name[32];
sprintf(name, "amdgpu_vcn_%d_fwlog", i);
debugfs_create_file_size(name, S_IFREG | 0444, root, vcn,
&amdgpu_debugfs_vcnfwlog_fops,
AMDGPU_VCNFW_LOG_SIZE);
#endif
}
void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn)
{
#if defined(CONFIG_DEBUG_FS)
volatile uint32_t *flag = vcn->fw_shared.cpu_addr;
void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size;
volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
+ vcn->fw_shared.log_offset;
*flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG);
fw_log->is_enabled = 1;
fw_log->addr_lo = cpu_to_le32(fw_log_gpu_addr & 0xFFFFFFFF);
fw_log->addr_hi = cpu_to_le32(fw_log_gpu_addr >> 32);
fw_log->size = cpu_to_le32(AMDGPU_VCNFW_LOG_SIZE);
log_buf->header_size = sizeof(struct amdgpu_vcn_fwlog);
log_buf->buffer_size = AMDGPU_VCNFW_LOG_SIZE;
log_buf->rptr = log_buf->header_size;
log_buf->wptr = log_buf->header_size;
log_buf->wrapped = 0;
#endif
}
int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
struct ras_common_if *ras_if = adev->vcn.ras_if;
struct ras_dispatch_if ih_data = {
.entry = entry,
};
if (!ras_if)
return 0;
if (!amdgpu_sriov_vf(adev)) {
ih_data.head = *ras_if;
amdgpu_ras_interrupt_dispatch(adev, &ih_data);
} else {
if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
adev->virt.ops->ras_poison_handler(adev);
else
dev_warn(adev->dev,
"No ras_poison_handler interface in SRIOV for VCN!\n");
}
return 0;
}
int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
int r, i;
r = amdgpu_ras_block_late_init(adev, ras_block);
if (r)
return r;
if (amdgpu_ras_is_supported(adev, ras_block->block)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (adev->vcn.harvest_config & (1 << i) ||
!adev->vcn.inst[i].ras_poison_irq.funcs)
continue;
r = amdgpu_irq_get(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
if (r)
goto late_fini;
}
}
return 0;
late_fini:
amdgpu_ras_block_late_fini(adev, ras_block);
return r;
}
int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev)
{
int err;
struct amdgpu_vcn_ras *ras;
if (!adev->vcn.ras)
return 0;
ras = adev->vcn.ras;
err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
if (err) {
dev_err(adev->dev, "Failed to register vcn ras block!\n");
return err;
}
strcpy(ras->ras_block.ras_comm.name, "vcn");
ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN;
ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
adev->vcn.ras_if = &ras->ras_block.ras_comm;
if (!ras->ras_block.ras_late_init)
ras->ras_block.ras_late_init = amdgpu_vcn_ras_late_init;
return 0;
}
int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
enum AMDGPU_UCODE_ID ucode_id)
{
struct amdgpu_firmware_info ucode = {
.ucode_id = (ucode_id ? ucode_id :
(inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM :
AMDGPU_UCODE_ID_VCN0_RAM)),
.mc_addr = adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
.ucode_size = ((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
(uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr),
};
return psp_execute_ip_fw_load(&adev->psp, &ucode);
}
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c |
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/mmu_context.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
#include "oss/osssys_6_0_0_offset.h"
#include "oss/osssys_6_0_0_sh_mask.h"
#include "soc15_common.h"
#include "soc15d.h"
#include "v11_structs.h"
#include "soc21.h"
#include <uapi/linux/kfd_ioctl.h>
enum hqd_dequeue_request_type {
NO_ACTION = 0,
DRAIN_PIPE,
RESET_WAVES,
SAVE_WAVES
};
static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
uint32_t queue, uint32_t vmid)
{
mutex_lock(&adev->srbm_mutex);
soc21_grbm_select(adev, mec, pipe, queue, vmid);
}
static void unlock_srbm(struct amdgpu_device *adev)
{
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
uint32_t queue_id)
{
uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
lock_srbm(adev, mec, pipe, queue_id, 0);
}
static uint64_t get_queue_mask(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id)
{
unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe +
queue_id;
return 1ull << bit;
}
static void release_queue(struct amdgpu_device *adev)
{
unlock_srbm(adev);
}
static void program_sh_mem_settings_v11(struct amdgpu_device *adev, uint32_t vmid,
uint32_t sh_mem_config,
uint32_t sh_mem_ape1_base,
uint32_t sh_mem_ape1_limit,
uint32_t sh_mem_bases, uint32_t inst)
{
lock_srbm(adev, 0, 0, 0, vmid);
WREG32(SOC15_REG_OFFSET(GC, 0, regSH_MEM_CONFIG), sh_mem_config);
WREG32(SOC15_REG_OFFSET(GC, 0, regSH_MEM_BASES), sh_mem_bases);
unlock_srbm(adev);
}
static int set_pasid_vmid_mapping_v11(struct amdgpu_device *adev, unsigned int pasid,
unsigned int vmid, uint32_t inst)
{
uint32_t value = pasid << IH_VMID_0_LUT__PASID__SHIFT;
/* Mapping vmid to pasid also for IH block */
pr_debug("mapping vmid %d -> pasid %d in IH block for GFX client\n",
vmid, pasid);
WREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid, value);
return 0;
}
static int init_interrupts_v11(struct amdgpu_device *adev, uint32_t pipe_id,
uint32_t inst)
{
uint32_t mec;
uint32_t pipe;
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
lock_srbm(adev, mec, pipe, 0, 0);
WREG32_SOC15(GC, 0, regCPC_INT_CNTL,
CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
unlock_srbm(adev);
return 0;
}
static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
unsigned int engine_id,
unsigned int queue_id)
{
uint32_t sdma_engine_reg_base = 0;
uint32_t sdma_rlc_reg_offset;
switch (engine_id) {
case 0:
sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
regSDMA0_QUEUE0_RB_CNTL) - regSDMA0_QUEUE0_RB_CNTL;
break;
case 1:
sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
regSDMA1_QUEUE0_RB_CNTL) - regSDMA0_QUEUE0_RB_CNTL;
break;
default:
BUG();
}
sdma_rlc_reg_offset = sdma_engine_reg_base
+ queue_id * (regSDMA0_QUEUE1_RB_CNTL - regSDMA0_QUEUE0_RB_CNTL);
pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
queue_id, sdma_rlc_reg_offset);
return sdma_rlc_reg_offset;
}
static inline struct v11_compute_mqd *get_mqd(void *mqd)
{
return (struct v11_compute_mqd *)mqd;
}
static inline struct v11_sdma_mqd *get_sdma_mqd(void *mqd)
{
return (struct v11_sdma_mqd *)mqd;
}
static int hqd_load_v11(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr,
uint32_t wptr_shift, uint32_t wptr_mask,
struct mm_struct *mm, uint32_t inst)
{
struct v11_compute_mqd *m;
uint32_t *mqd_hqd;
uint32_t reg, hqd_base, data;
m = get_mqd(mqd);
pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id);
acquire_queue(adev, pipe_id, queue_id);
/* HIQ is set during driver init period with vmid set to 0*/
if (m->cp_hqd_vmid == 0) {
uint32_t value, mec, pipe;
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
mec, pipe, queue_id);
value = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_CP_SCHEDULERS));
value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
((mec << 5) | (pipe << 3) | queue_id | 0x80));
WREG32(SOC15_REG_OFFSET(GC, 0, regRLC_CP_SCHEDULERS), value);
}
/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
mqd_hqd = &m->cp_mqd_base_addr_lo;
hqd_base = SOC15_REG_OFFSET(GC, 0, regCP_MQD_BASE_ADDR);
for (reg = hqd_base;
reg <= SOC15_REG_OFFSET(GC, 0, regCP_HQD_PQ_WPTR_HI); reg++)
WREG32(reg, mqd_hqd[reg - hqd_base]);
/* Activate doorbell logic before triggering WPTR poll. */
data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
WREG32(SOC15_REG_OFFSET(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL), data);
if (wptr) {
/* Don't read wptr with get_user because the user
* context may not be accessible (if this function
* runs in a work queue). Instead trigger a one-shot
* polling read from memory in the CP. This assumes
* that wptr is GPU-accessible in the queue's VMID via
* ATC or SVM. WPTR==RPTR before starting the poll so
* the CP starts fetching new commands from the right
* place.
*
* Guessing a 64-bit WPTR from a 32-bit RPTR is a bit
* tricky. Assume that the queue didn't overflow. The
* number of valid bits in the 32-bit RPTR depends on
* the queue size. The remaining bits are taken from
* the saved 64-bit WPTR. If the WPTR wrapped, add the
* queue size.
*/
uint32_t queue_size =
2 << REG_GET_FIELD(m->cp_hqd_pq_control,
CP_HQD_PQ_CONTROL, QUEUE_SIZE);
uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1);
if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr)
guessed_wptr += queue_size;
guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
WREG32(SOC15_REG_OFFSET(GC, 0, regCP_HQD_PQ_WPTR_LO),
lower_32_bits(guessed_wptr));
WREG32(SOC15_REG_OFFSET(GC, 0, regCP_HQD_PQ_WPTR_HI),
upper_32_bits(guessed_wptr));
WREG32(SOC15_REG_OFFSET(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR),
lower_32_bits((uint64_t)wptr));
WREG32(SOC15_REG_OFFSET(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI),
upper_32_bits((uint64_t)wptr));
pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__,
(uint32_t)get_queue_mask(adev, pipe_id, queue_id));
WREG32(SOC15_REG_OFFSET(GC, 0, regCP_PQ_WPTR_POLL_CNTL1),
(uint32_t)get_queue_mask(adev, pipe_id, queue_id));
}
/* Start the EOP fetcher */
WREG32(SOC15_REG_OFFSET(GC, 0, regCP_HQD_EOP_RPTR),
REG_SET_FIELD(m->cp_hqd_eop_rptr,
CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
WREG32(SOC15_REG_OFFSET(GC, 0, regCP_HQD_ACTIVE), data);
release_queue(adev);
return 0;
}
static int hiq_mqd_load_v11(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t doorbell_off, uint32_t inst)
{
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
struct v11_compute_mqd *m;
uint32_t mec, pipe;
int r;
m = get_mqd(mqd);
acquire_queue(adev, pipe_id, queue_id);
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
mec, pipe, queue_id);
spin_lock(&adev->gfx.kiq[0].ring_lock);
r = amdgpu_ring_alloc(kiq_ring, 7);
if (r) {
pr_err("Failed to alloc KIQ (%d).\n", r);
goto out_unlock;
}
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
amdgpu_ring_write(kiq_ring,
PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */
PACKET3_MAP_QUEUES_QUEUE(queue_id) |
PACKET3_MAP_QUEUES_PIPE(pipe) |
PACKET3_MAP_QUEUES_ME((mec - 1)) |
PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */
PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
amdgpu_ring_write(kiq_ring,
PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off));
amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo);
amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi);
amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo);
amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi);
amdgpu_ring_commit(kiq_ring);
out_unlock:
spin_unlock(&adev->gfx.kiq[0].ring_lock);
release_queue(adev);
return r;
}
static int hqd_dump_v11(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst)
{
uint32_t i = 0, reg;
#define HQD_N_REGS 56
#define DUMP_REG(addr) do { \
if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
break; \
(*dump)[i][0] = (addr) << 2; \
(*dump)[i++][1] = RREG32(addr); \
} while (0)
*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
acquire_queue(adev, pipe_id, queue_id);
for (reg = SOC15_REG_OFFSET(GC, 0, regCP_MQD_BASE_ADDR);
reg <= SOC15_REG_OFFSET(GC, 0, regCP_HQD_PQ_WPTR_HI); reg++)
DUMP_REG(reg);
release_queue(adev);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
return 0;
}
static int hqd_sdma_load_v11(struct amdgpu_device *adev, void *mqd,
uint32_t __user *wptr, struct mm_struct *mm)
{
struct v11_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
unsigned long end_jiffies;
uint32_t data;
uint64_t data64;
uint64_t __user *wptr64 = (uint64_t __user *)wptr;
m = get_sdma_mqd(mqd);
sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_CNTL,
m->sdmax_rlcx_rb_cntl & (~SDMA0_QUEUE0_RB_CNTL__RB_ENABLE_MASK));
end_jiffies = msecs_to_jiffies(2000) + jiffies;
while (true) {
data = RREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_CONTEXT_STATUS);
if (data & SDMA0_QUEUE0_CONTEXT_STATUS__IDLE_MASK)
break;
if (time_after(jiffies, end_jiffies)) {
pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
}
usleep_range(500, 1000);
}
WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_DOORBELL_OFFSET,
m->sdmax_rlcx_doorbell_offset);
data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_QUEUE0_DOORBELL,
ENABLE, 1);
WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_DOORBELL, data);
WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_RPTR,
m->sdmax_rlcx_rb_rptr);
WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_RPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_MINOR_PTR_UPDATE, 1);
if (read_user_wptr(mm, wptr64, data64)) {
WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_WPTR,
lower_32_bits(data64));
WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_WPTR_HI,
upper_32_bits(data64));
} else {
WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_WPTR,
m->sdmax_rlcx_rb_rptr);
WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_WPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
}
WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_MINOR_PTR_UPDATE, 0);
WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_BASE, m->sdmax_rlcx_rb_base);
WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_BASE_HI,
m->sdmax_rlcx_rb_base_hi);
WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_RPTR_ADDR_LO,
m->sdmax_rlcx_rb_rptr_addr_lo);
WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_RPTR_ADDR_HI,
m->sdmax_rlcx_rb_rptr_addr_hi);
data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_QUEUE0_RB_CNTL,
RB_ENABLE, 1);
WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_CNTL, data);
return 0;
}
static int hqd_sdma_dump_v11(struct amdgpu_device *adev,
uint32_t engine_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
{
uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
engine_id, queue_id);
uint32_t i = 0, reg;
#undef HQD_N_REGS
#define HQD_N_REGS (7+11+1+12+12)
*dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
for (reg = regSDMA0_QUEUE0_RB_CNTL;
reg <= regSDMA0_QUEUE0_RB_WPTR_HI; reg++)
DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = regSDMA0_QUEUE0_RB_RPTR_ADDR_HI;
reg <= regSDMA0_QUEUE0_DOORBELL; reg++)
DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = regSDMA0_QUEUE0_DOORBELL_LOG;
reg <= regSDMA0_QUEUE0_DOORBELL_LOG; reg++)
DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = regSDMA0_QUEUE0_DOORBELL_OFFSET;
reg <= regSDMA0_QUEUE0_RB_PREEMPT; reg++)
DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = regSDMA0_QUEUE0_MIDCMD_DATA0;
reg <= regSDMA0_QUEUE0_MIDCMD_CNTL; reg++)
DUMP_REG(sdma_rlc_reg_offset + reg);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
return 0;
}
static bool hqd_is_occupied_v11(struct amdgpu_device *adev, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id, uint32_t inst)
{
uint32_t act;
bool retval = false;
uint32_t low, high;
acquire_queue(adev, pipe_id, queue_id);
act = RREG32(SOC15_REG_OFFSET(GC, 0, regCP_HQD_ACTIVE));
if (act) {
low = lower_32_bits(queue_address >> 8);
high = upper_32_bits(queue_address >> 8);
if (low == RREG32(SOC15_REG_OFFSET(GC, 0, regCP_HQD_PQ_BASE)) &&
high == RREG32(SOC15_REG_OFFSET(GC, 0, regCP_HQD_PQ_BASE_HI)))
retval = true;
}
release_queue(adev);
return retval;
}
static bool hqd_sdma_is_occupied_v11(struct amdgpu_device *adev, void *mqd)
{
struct v11_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
m = get_sdma_mqd(mqd);
sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_CNTL);
if (sdma_rlc_rb_cntl & SDMA0_QUEUE0_RB_CNTL__RB_ENABLE_MASK)
return true;
return false;
}
static int hqd_destroy_v11(struct amdgpu_device *adev, void *mqd,
enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id, uint32_t inst)
{
enum hqd_dequeue_request_type type;
unsigned long end_jiffies;
uint32_t temp;
struct v11_compute_mqd *m = get_mqd(mqd);
acquire_queue(adev, pipe_id, queue_id);
if (m->cp_hqd_vmid == 0)
WREG32_FIELD15_PREREG(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
switch (reset_type) {
case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
type = DRAIN_PIPE;
break;
case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
type = RESET_WAVES;
break;
default:
type = DRAIN_PIPE;
break;
}
WREG32(SOC15_REG_OFFSET(GC, 0, regCP_HQD_DEQUEUE_REQUEST), type);
end_jiffies = (utimeout * HZ / 1000) + jiffies;
while (true) {
temp = RREG32(SOC15_REG_OFFSET(GC, 0, regCP_HQD_ACTIVE));
if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
break;
if (time_after(jiffies, end_jiffies)) {
pr_err("cp queue pipe %d queue %d preemption failed\n",
pipe_id, queue_id);
release_queue(adev);
return -ETIME;
}
usleep_range(500, 1000);
}
release_queue(adev);
return 0;
}
static int hqd_sdma_destroy_v11(struct amdgpu_device *adev, void *mqd,
unsigned int utimeout)
{
struct v11_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
uint32_t temp;
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
temp = RREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_CNTL);
temp = temp & ~SDMA0_QUEUE0_RB_CNTL__RB_ENABLE_MASK;
WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_CNTL, temp);
while (true) {
temp = RREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_CONTEXT_STATUS);
if (temp & SDMA0_QUEUE0_CONTEXT_STATUS__IDLE_MASK)
break;
if (time_after(jiffies, end_jiffies)) {
pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
}
usleep_range(500, 1000);
}
WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_DOORBELL, 0);
WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_CNTL,
RREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_CNTL) |
SDMA0_QUEUE0_RB_CNTL__RB_ENABLE_MASK);
m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_RPTR);
m->sdmax_rlcx_rb_rptr_hi =
RREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_RPTR_HI);
return 0;
}
static int wave_control_execute_v11(struct amdgpu_device *adev,
uint32_t gfx_index_val,
uint32_t sq_cmd, uint32_t inst)
{
uint32_t data = 0;
mutex_lock(&adev->grbm_idx_mutex);
WREG32(SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_INDEX), gfx_index_val);
WREG32(SOC15_REG_OFFSET(GC, 0, regSQ_CMD), sq_cmd);
data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
INSTANCE_BROADCAST_WRITES, 1);
data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
SA_BROADCAST_WRITES, 1);
data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
SE_BROADCAST_WRITES, 1);
WREG32(SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_INDEX), data);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
}
static void set_vm_context_page_table_base_v11(struct amdgpu_device *adev,
uint32_t vmid, uint64_t page_table_base)
{
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
pr_err("trying to set page table base for wrong VMID %u\n",
vmid);
return;
}
/* SDMA is on gfxhub as well for gfx11 adapters */
adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
}
/*
* Returns TRAP_EN, EXCP_EN and EXCP_REPLACE.
*
* restore_dbg_registers is ignored here but is a general interface requirement
* for devices that support GFXOFF and where the RLC save/restore list
* does not support hw registers for debugging i.e. the driver has to manually
* initialize the debug mode registers after it has disabled GFX off during the
* debug session.
*/
static uint32_t kgd_gfx_v11_enable_debug_trap(struct amdgpu_device *adev,
bool restore_dbg_registers,
uint32_t vmid)
{
uint32_t data = 0;
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0);
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, 0);
return data;
}
/* Returns TRAP_EN, EXCP_EN and EXCP_REPLACE. */
static uint32_t kgd_gfx_v11_disable_debug_trap(struct amdgpu_device *adev,
bool keep_trap_enabled,
uint32_t vmid)
{
uint32_t data = 0;
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0);
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, 0);
return data;
}
static int kgd_gfx_v11_validate_trap_override_request(struct amdgpu_device *adev,
uint32_t trap_override,
uint32_t *trap_mask_supported)
{
*trap_mask_supported &= KFD_DBG_TRAP_MASK_FP_INVALID |
KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL |
KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO |
KFD_DBG_TRAP_MASK_FP_OVERFLOW |
KFD_DBG_TRAP_MASK_FP_UNDERFLOW |
KFD_DBG_TRAP_MASK_FP_INEXACT |
KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO |
KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH |
KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION;
if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 4))
*trap_mask_supported |= KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START |
KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END;
if (trap_override != KFD_DBG_TRAP_OVERRIDE_OR &&
trap_override != KFD_DBG_TRAP_OVERRIDE_REPLACE)
return -EPERM;
return 0;
}
static uint32_t trap_mask_map_sw_to_hw(uint32_t mask)
{
uint32_t trap_on_start = (mask & KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START) ? 1 : 0;
uint32_t trap_on_end = (mask & KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END) ? 1 : 0;
uint32_t excp_en = mask & (KFD_DBG_TRAP_MASK_FP_INVALID |
KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL |
KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO |
KFD_DBG_TRAP_MASK_FP_OVERFLOW |
KFD_DBG_TRAP_MASK_FP_UNDERFLOW |
KFD_DBG_TRAP_MASK_FP_INEXACT |
KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO |
KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH |
KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION);
uint32_t ret;
ret = REG_SET_FIELD(0, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, excp_en);
ret = REG_SET_FIELD(ret, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_START, trap_on_start);
ret = REG_SET_FIELD(ret, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_END, trap_on_end);
return ret;
}
static uint32_t trap_mask_map_hw_to_sw(uint32_t mask)
{
uint32_t ret = REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, EXCP_EN);
if (REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_START))
ret |= KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START;
if (REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_END))
ret |= KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END;
return ret;
}
/* Returns TRAP_EN, EXCP_EN and EXCP_REPLACE. */
static uint32_t kgd_gfx_v11_set_wave_launch_trap_override(struct amdgpu_device *adev,
uint32_t vmid,
uint32_t trap_override,
uint32_t trap_mask_bits,
uint32_t trap_mask_request,
uint32_t *trap_mask_prev,
uint32_t kfd_dbg_trap_cntl_prev)
{
uint32_t data = 0;
*trap_mask_prev = trap_mask_map_hw_to_sw(kfd_dbg_trap_cntl_prev);
data = (trap_mask_bits & trap_mask_request) | (*trap_mask_prev & ~trap_mask_request);
data = trap_mask_map_sw_to_hw(data);
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, trap_override);
return data;
}
static uint32_t kgd_gfx_v11_set_wave_launch_mode(struct amdgpu_device *adev,
uint8_t wave_launch_mode,
uint32_t vmid)
{
uint32_t data = 0;
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, LAUNCH_MODE, wave_launch_mode);
return data;
}
#define TCP_WATCH_STRIDE (regTCP_WATCH1_ADDR_H - regTCP_WATCH0_ADDR_H)
static uint32_t kgd_gfx_v11_set_address_watch(struct amdgpu_device *adev,
uint64_t watch_address,
uint32_t watch_address_mask,
uint32_t watch_id,
uint32_t watch_mode,
uint32_t debug_vmid,
uint32_t inst)
{
uint32_t watch_address_high;
uint32_t watch_address_low;
uint32_t watch_address_cntl;
watch_address_cntl = 0;
watch_address_low = lower_32_bits(watch_address);
watch_address_high = upper_32_bits(watch_address) & 0xffff;
watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
TCP_WATCH0_CNTL,
MODE,
watch_mode);
watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
TCP_WATCH0_CNTL,
MASK,
watch_address_mask >> 7);
watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
TCP_WATCH0_CNTL,
VALID,
1);
WREG32_RLC((SOC15_REG_OFFSET(GC, 0, regTCP_WATCH0_ADDR_H) +
(watch_id * TCP_WATCH_STRIDE)),
watch_address_high);
WREG32_RLC((SOC15_REG_OFFSET(GC, 0, regTCP_WATCH0_ADDR_L) +
(watch_id * TCP_WATCH_STRIDE)),
watch_address_low);
return watch_address_cntl;
}
static uint32_t kgd_gfx_v11_clear_address_watch(struct amdgpu_device *adev,
uint32_t watch_id)
{
return 0;
}
const struct kfd2kgd_calls gfx_v11_kfd2kgd = {
.program_sh_mem_settings = program_sh_mem_settings_v11,
.set_pasid_vmid_mapping = set_pasid_vmid_mapping_v11,
.init_interrupts = init_interrupts_v11,
.hqd_load = hqd_load_v11,
.hiq_mqd_load = hiq_mqd_load_v11,
.hqd_sdma_load = hqd_sdma_load_v11,
.hqd_dump = hqd_dump_v11,
.hqd_sdma_dump = hqd_sdma_dump_v11,
.hqd_is_occupied = hqd_is_occupied_v11,
.hqd_sdma_is_occupied = hqd_sdma_is_occupied_v11,
.hqd_destroy = hqd_destroy_v11,
.hqd_sdma_destroy = hqd_sdma_destroy_v11,
.wave_control_execute = wave_control_execute_v11,
.get_atc_vmid_pasid_mapping_info = NULL,
.set_vm_context_page_table_base = set_vm_context_page_table_base_v11,
.enable_debug_trap = kgd_gfx_v11_enable_debug_trap,
.disable_debug_trap = kgd_gfx_v11_disable_debug_trap,
.validate_trap_override_request = kgd_gfx_v11_validate_trap_override_request,
.set_wave_launch_trap_override = kgd_gfx_v11_set_wave_launch_trap_override,
.set_wave_launch_mode = kgd_gfx_v11_set_wave_launch_mode,
.set_address_watch = kgd_gfx_v11_set_address_watch,
.clear_address_watch = kgd_gfx_v11_clear_address_watch
};
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c |
/*
* Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "amdgpu_vf_error.h"
#include "mxgpu_ai.h"
void amdgpu_vf_error_put(struct amdgpu_device *adev,
uint16_t sub_error_code,
uint16_t error_flags,
uint64_t error_data)
{
int index;
uint16_t error_code;
if (!amdgpu_sriov_vf(adev))
return;
error_code = AMDGIM_ERROR_CODE(AMDGIM_ERROR_CATEGORY_VF, sub_error_code);
mutex_lock(&adev->virt.vf_errors.lock);
index = adev->virt.vf_errors.write_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
adev->virt.vf_errors.code [index] = error_code;
adev->virt.vf_errors.flags [index] = error_flags;
adev->virt.vf_errors.data [index] = error_data;
adev->virt.vf_errors.write_count ++;
mutex_unlock(&adev->virt.vf_errors.lock);
}
void amdgpu_vf_error_trans_all(struct amdgpu_device *adev)
{
/* u32 pf2vf_flags = 0; */
u32 data1, data2, data3;
int index;
if ((NULL == adev) || (!amdgpu_sriov_vf(adev)) ||
(!adev->virt.ops) || (!adev->virt.ops->trans_msg)) {
return;
}
/*
TODO: Enable these code when pv2vf_info is merged
AMDGPU_FW_VRAM_PF2VF_READ (adev, feature_flags, &pf2vf_flags);
if (!(pf2vf_flags & AMDGIM_FEATURE_ERROR_LOG_COLLECT)) {
return;
}
*/
mutex_lock(&adev->virt.vf_errors.lock);
/* The errors are overlay of array, correct read_count as full. */
if (adev->virt.vf_errors.write_count - adev->virt.vf_errors.read_count > AMDGPU_VF_ERROR_ENTRY_SIZE) {
adev->virt.vf_errors.read_count = adev->virt.vf_errors.write_count - AMDGPU_VF_ERROR_ENTRY_SIZE;
}
while (adev->virt.vf_errors.read_count < adev->virt.vf_errors.write_count) {
index =adev->virt.vf_errors.read_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
data1 = AMDGIM_ERROR_CODE_FLAGS_TO_MAILBOX(adev->virt.vf_errors.code[index],
adev->virt.vf_errors.flags[index]);
data2 = adev->virt.vf_errors.data[index] & 0xFFFFFFFF;
data3 = (adev->virt.vf_errors.data[index] >> 32) & 0xFFFFFFFF;
adev->virt.ops->trans_msg(adev, IDH_LOG_VF_ERROR, data1, data2, data3);
adev->virt.vf_errors.read_count ++;
}
mutex_unlock(&adev->virt.vf_errors.lock);
}
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "hdp_v6_0.h"
#include "hdp/hdp_6_0_0_offset.h"
#include "hdp/hdp_6_0_0_sh_mask.h"
#include <uapi/linux/kfd_ioctl.h>
static void hdp_v6_0_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
else
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
static void hdp_v6_0_update_clock_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t hdp_clk_cntl, hdp_clk_cntl1;
uint32_t hdp_mem_pwr_cntl;
if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_HDP_DS |
AMD_CG_SUPPORT_HDP_SD)))
return;
hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0,regHDP_CLK_CNTL);
hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL);
/* Before doing clock/power mode switch,
* forced on IPH & RC clock */
hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
RC_MEM_CLK_SOFT_OVERRIDE, 1);
WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
/* disable clock and power gating before any changing */
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
ATOMIC_MEM_POWER_CTRL_EN, 0);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
ATOMIC_MEM_POWER_LS_EN, 0);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
ATOMIC_MEM_POWER_DS_EN, 0);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
ATOMIC_MEM_POWER_SD_EN, 0);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
RC_MEM_POWER_CTRL_EN, 0);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
RC_MEM_POWER_LS_EN, 0);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
RC_MEM_POWER_DS_EN, 0);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
RC_MEM_POWER_SD_EN, 0);
WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
/* Already disabled above. The actions below are for "enabled" only */
if (enable) {
/* only one clock gating mode (LS/DS/SD) can be enabled */
if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
HDP_MEM_POWER_CTRL,
ATOMIC_MEM_POWER_SD_EN, 1);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
HDP_MEM_POWER_CTRL,
RC_MEM_POWER_SD_EN, 1);
} else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
HDP_MEM_POWER_CTRL,
ATOMIC_MEM_POWER_LS_EN, 1);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
HDP_MEM_POWER_CTRL,
RC_MEM_POWER_LS_EN, 1);
} else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
HDP_MEM_POWER_CTRL,
ATOMIC_MEM_POWER_DS_EN, 1);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
HDP_MEM_POWER_CTRL,
RC_MEM_POWER_DS_EN, 1);
}
/* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to
* be set for SRAM LS/DS/SD */
if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
AMD_CG_SUPPORT_HDP_SD)) {
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
ATOMIC_MEM_POWER_CTRL_EN, 1);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
RC_MEM_POWER_CTRL_EN, 1);
WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
}
}
/* disable IPH & RC clock override after clock/power mode changing */
hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
RC_MEM_CLK_SOFT_OVERRIDE, 0);
WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
}
static void hdp_v6_0_get_clockgating_state(struct amdgpu_device *adev,
u64 *flags)
{
uint32_t tmp;
/* AMD_CG_SUPPORT_HDP_LS/DS/SD */
tmp = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL);
if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_LS_EN_MASK)
*flags |= AMD_CG_SUPPORT_HDP_LS;
else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DS_EN_MASK)
*flags |= AMD_CG_SUPPORT_HDP_DS;
else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_SD_EN_MASK)
*flags |= AMD_CG_SUPPORT_HDP_SD;
}
const struct amdgpu_hdp_funcs hdp_v6_0_funcs = {
.flush_hdp = hdp_v6_0_flush_hdp,
.update_clock_gating = hdp_v6_0_update_clock_gating,
.get_clock_gating_state = hdp_v6_0_get_clockgating_state,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "sdma/sdma_4_4_0_offset.h"
#include "sdma/sdma_4_4_0_sh_mask.h"
#include "soc15.h"
#include "amdgpu_ras.h"
#define SDMA1_REG_OFFSET 0x600
#define SDMA2_REG_OFFSET 0x1cda0
#define SDMA3_REG_OFFSET 0x1d1a0
#define SDMA4_REG_OFFSET 0x1d5a0
/* helper function that allow only use sdma0 register offset
* to calculate register offset for all the sdma instances */
static uint32_t sdma_v4_4_get_reg_offset(struct amdgpu_device *adev,
uint32_t instance,
uint32_t offset)
{
uint32_t sdma_base = adev->reg_offset[SDMA0_HWIP][0][0];
switch (instance) {
case 0:
return (sdma_base + offset);
case 1:
return (sdma_base + SDMA1_REG_OFFSET + offset);
case 2:
return (sdma_base + SDMA2_REG_OFFSET + offset);
case 3:
return (sdma_base + SDMA3_REG_OFFSET + offset);
case 4:
return (sdma_base + SDMA4_REG_OFFSET + offset);
default:
break;
}
return 0;
}
static const struct soc15_ras_field_entry sdma_v4_4_ras_fields[] = {
{ "SDMA_MBANK_DATA_BUF0_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF0_SED),
0, 0,
},
{ "SDMA_MBANK_DATA_BUF1_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF1_SED),
0, 0,
},
{ "SDMA_MBANK_DATA_BUF2_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF2_SED),
0, 0,
},
{ "SDMA_MBANK_DATA_BUF3_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF3_SED),
0, 0,
},
{ "SDMA_MBANK_DATA_BUF4_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF4_SED),
0, 0,
},
{ "SDMA_MBANK_DATA_BUF5_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF5_SED),
0, 0,
},
{ "SDMA_MBANK_DATA_BUF6_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF6_SED),
0, 0,
},
{ "SDMA_MBANK_DATA_BUF7_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF7_SED),
0, 0,
},
{ "SDMA_MBANK_DATA_BUF8_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF8_SED),
0, 0,
},
{ "SDMA_MBANK_DATA_BUF9_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF9_SED),
0, 0,
},
{ "SDMA_MBANK_DATA_BUF10_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF10_SED),
0, 0,
},
{ "SDMA_MBANK_DATA_BUF11_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF11_SED),
0, 0,
},
{ "SDMA_MBANK_DATA_BUF12_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF12_SED),
0, 0,
},
{ "SDMA_MBANK_DATA_BUF13_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF13_SED),
0, 0,
},
{ "SDMA_MBANK_DATA_BUF14_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF14_SED),
0, 0,
},
{ "SDMA_MBANK_DATA_BUF15_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF15_SED),
0, 0,
},
{ "SDMA_UCODE_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_UCODE_BUF_SED),
0, 0,
},
{ "SDMA_RB_CMD_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_RB_CMD_BUF_SED),
0, 0,
},
{ "SDMA_IB_CMD_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_IB_CMD_BUF_SED),
0, 0,
},
{ "SDMA_UTCL1_RD_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_UTCL1_RD_FIFO_SED),
0, 0,
},
{ "SDMA_UTCL1_RDBST_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_UTCL1_RDBST_FIFO_SED),
0, 0,
},
{ "SDMA_UTCL1_WR_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_UTCL1_WR_FIFO_SED),
0, 0,
},
{ "SDMA_DATA_LUT_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_DATA_LUT_FIFO_SED),
0, 0,
},
{ "SDMA_SPLIT_DATA_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_SPLIT_DATA_BUF_SED),
0, 0,
},
{ "SDMA_MC_WR_ADDR_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_MC_WR_ADDR_FIFO_SED),
0, 0,
},
{ "SDMA_MC_RDRET_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_MC_WR_ADDR_FIFO_SED),
0, 0,
},
};
static void sdma_v4_4_get_ras_error_count(struct amdgpu_device *adev,
uint32_t reg_offset,
uint32_t value,
uint32_t instance,
uint32_t *sec_count)
{
uint32_t i;
uint32_t sec_cnt;
/* double bits error (multiple bits) error detection is not supported */
for (i = 0; i < ARRAY_SIZE(sdma_v4_4_ras_fields); i++) {
if (sdma_v4_4_ras_fields[i].reg_offset != reg_offset)
continue;
/* the SDMA_EDC_COUNTER register in each sdma instance
* shares the same sed shift_mask
* */
sec_cnt = (value &
sdma_v4_4_ras_fields[i].sec_count_mask) >>
sdma_v4_4_ras_fields[i].sec_count_shift;
if (sec_cnt) {
dev_info(adev->dev, "Detected %s in SDMA%d, SED %d\n",
sdma_v4_4_ras_fields[i].name,
instance, sec_cnt);
*sec_count += sec_cnt;
}
}
}
static int sdma_v4_4_query_ras_error_count_by_instance(struct amdgpu_device *adev,
uint32_t instance,
void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
uint32_t sec_count = 0;
uint32_t reg_value = 0;
uint32_t reg_offset = 0;
reg_offset = sdma_v4_4_get_reg_offset(adev, instance, regSDMA0_EDC_COUNTER);
reg_value = RREG32(reg_offset);
/* double bit error is not supported */
if (reg_value)
sdma_v4_4_get_ras_error_count(adev, regSDMA0_EDC_COUNTER, reg_value,
instance, &sec_count);
reg_offset = sdma_v4_4_get_reg_offset(adev, instance, regSDMA0_EDC_COUNTER2);
reg_value = RREG32(reg_offset);
/* double bit error is not supported */
if (reg_value)
sdma_v4_4_get_ras_error_count(adev, regSDMA0_EDC_COUNTER2, reg_value,
instance, &sec_count);
/*
* err_data->ue_count should be initialized to 0
* before calling into this function
*
* SDMA RAS supports single bit uncorrectable error detection.
* So, increment uncorrectable error count.
*/
err_data->ue_count += sec_count;
/*
* SDMA RAS does not support correctable errors.
* Set ce count to 0.
*/
err_data->ce_count = 0;
return 0;
};
static void sdma_v4_4_reset_ras_error_count(struct amdgpu_device *adev)
{
int i;
uint32_t reg_offset;
/* write 0 to EDC_COUNTER reg to clear sdma edc counters */
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
for (i = 0; i < adev->sdma.num_instances; i++) {
reg_offset = sdma_v4_4_get_reg_offset(adev, i, regSDMA0_EDC_COUNTER);
WREG32(reg_offset, 0);
reg_offset = sdma_v4_4_get_reg_offset(adev, i, regSDMA0_EDC_COUNTER2);
WREG32(reg_offset, 0);
}
}
}
static void sdma_v4_4_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status)
{
int i = 0;
for (i = 0; i < adev->sdma.num_instances; i++) {
if (sdma_v4_4_query_ras_error_count_by_instance(adev, i, ras_error_status)) {
dev_err(adev->dev, "Query ras error count failed in SDMA%d\n", i);
return;
}
}
}
const struct amdgpu_ras_block_hw_ops sdma_v4_4_ras_hw_ops = {
.query_ras_error_count = sdma_v4_4_query_ras_error_count,
.reset_ras_error_count = sdma_v4_4_reset_ras_error_count,
};
struct amdgpu_sdma_ras sdma_v4_4_ras = {
.ras_block = {
.hw_ops = &sdma_v4_4_ras_hw_ops,
},
};
| linux-master | drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c |
/*
* Copyright (C) 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "amdgpu.h"
#include "amdgpu_ras.h"
int amdgpu_mmhub_ras_sw_init(struct amdgpu_device *adev)
{
int err;
struct amdgpu_mmhub_ras *ras;
if (!adev->mmhub.ras)
return 0;
ras = adev->mmhub.ras;
err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
if (err) {
dev_err(adev->dev, "Failed to register mmhub ras block!\n");
return err;
}
strcpy(ras->ras_block.ras_comm.name, "mmhub");
ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MMHUB;
ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
adev->mmhub.ras_if = &ras->ras_block.ras_comm;
/* mmhub ras follows amdgpu_ras_block_late_init_default for late init */
return 0;
}
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c |
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "umc_v6_7.h"
#include "amdgpu_ras.h"
#include "amdgpu_umc.h"
#include "amdgpu.h"
#include "umc/umc_6_7_0_offset.h"
#include "umc/umc_6_7_0_sh_mask.h"
const uint32_t
umc_v6_7_channel_idx_tbl_second[UMC_V6_7_UMC_INSTANCE_NUM][UMC_V6_7_CHANNEL_INSTANCE_NUM] = {
{28, 20, 24, 16, 12, 4, 8, 0},
{6, 30, 2, 26, 22, 14, 18, 10},
{19, 11, 15, 7, 3, 27, 31, 23},
{9, 1, 5, 29, 25, 17, 21, 13}
};
const uint32_t
umc_v6_7_channel_idx_tbl_first[UMC_V6_7_UMC_INSTANCE_NUM][UMC_V6_7_CHANNEL_INSTANCE_NUM] = {
{19, 11, 15, 7, 3, 27, 31, 23},
{9, 1, 5, 29, 25, 17, 21, 13},
{28, 20, 24, 16, 12, 4, 8, 0},
{6, 30, 2, 26, 22, 14, 18, 10},
};
static inline uint32_t get_umc_v6_7_reg_offset(struct amdgpu_device *adev,
uint32_t umc_inst,
uint32_t ch_inst)
{
uint32_t index = umc_inst * adev->umc.channel_inst_num + ch_inst;
/* adjust umc and channel index offset,
* the register address is not linear on each umc instace */
umc_inst = index / 4;
ch_inst = index % 4;
return adev->umc.channel_offs * ch_inst + UMC_V6_7_INST_DIST * umc_inst;
}
static void umc_v6_7_query_error_status_helper(struct amdgpu_device *adev,
uint64_t mc_umc_status, uint32_t umc_reg_offset)
{
uint32_t mc_umc_addr;
uint64_t reg_value;
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1)
dev_info(adev->dev, "Deferred error, no user action is needed.\n");
if (mc_umc_status)
dev_info(adev->dev, "MCA STATUS 0x%llx, umc_reg_offset 0x%x\n", mc_umc_status, umc_reg_offset);
/* print IPID registers value */
mc_umc_addr =
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_IPIDT0);
reg_value = RREG64_PCIE((mc_umc_addr + umc_reg_offset) * 4);
if (reg_value)
dev_info(adev->dev, "MCA IPID 0x%llx, umc_reg_offset 0x%x\n", reg_value, umc_reg_offset);
/* print SYND registers value */
mc_umc_addr =
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_SYNDT0);
reg_value = RREG64_PCIE((mc_umc_addr + umc_reg_offset) * 4);
if (reg_value)
dev_info(adev->dev, "MCA SYND 0x%llx, umc_reg_offset 0x%x\n", reg_value, umc_reg_offset);
/* print MISC0 registers value */
mc_umc_addr =
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_MISC0T0);
reg_value = RREG64_PCIE((mc_umc_addr + umc_reg_offset) * 4);
if (reg_value)
dev_info(adev->dev, "MCA MISC0 0x%llx, umc_reg_offset 0x%x\n", reg_value, umc_reg_offset);
}
static void umc_v6_7_ecc_info_query_correctable_error_count(struct amdgpu_device *adev,
uint32_t umc_inst, uint32_t ch_inst,
unsigned long *error_count)
{
uint64_t mc_umc_status;
uint32_t eccinfo_table_idx;
uint32_t umc_reg_offset;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
umc_reg_offset = get_umc_v6_7_reg_offset(adev,
umc_inst, ch_inst);
eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
/* check for SRAM correctable error
MCUMC_STATUS is a 64 bit register */
mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) {
*error_count += 1;
umc_v6_7_query_error_status_helper(adev, mc_umc_status, umc_reg_offset);
if (ras->umc_ecc.record_ce_addr_supported) {
uint64_t err_addr, soc_pa;
uint32_t channel_index =
adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_ceumc_addr;
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
/* translate umc channel address to soc pa, 3 parts are included */
soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
ADDR_OF_256B_BLOCK(channel_index) |
OFFSET_IN_256B_BLOCK(err_addr);
/* The umc channel bits are not original values, they are hashed */
SET_CHANNEL_HASH(channel_index, soc_pa);
dev_info(adev->dev, "Error Address(PA): 0x%llx\n", soc_pa);
}
}
}
static void umc_v6_7_ecc_info_querry_uncorrectable_error_count(struct amdgpu_device *adev,
uint32_t umc_inst, uint32_t ch_inst,
unsigned long *error_count)
{
uint64_t mc_umc_status;
uint32_t eccinfo_table_idx;
uint32_t umc_reg_offset;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
umc_reg_offset = get_umc_v6_7_reg_offset(adev,
umc_inst, ch_inst);
eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
/* check the MCUMC_STATUS */
mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) {
*error_count += 1;
umc_v6_7_query_error_status_helper(adev, mc_umc_status, umc_reg_offset);
}
}
static int umc_v6_7_ecc_info_querry_ecc_error_count(struct amdgpu_device *adev,
uint32_t node_inst, uint32_t umc_inst,
uint32_t ch_inst, void *data)
{
struct ras_err_data *err_data = (struct ras_err_data *)data;
umc_v6_7_ecc_info_query_correctable_error_count(adev,
umc_inst, ch_inst,
&(err_data->ce_count));
umc_v6_7_ecc_info_querry_uncorrectable_error_count(adev,
umc_inst, ch_inst,
&(err_data->ue_count));
return 0;
}
static void umc_v6_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
amdgpu_umc_loop_channels(adev,
umc_v6_7_ecc_info_querry_ecc_error_count, ras_error_status);
}
void umc_v6_7_convert_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data, uint64_t err_addr,
uint32_t ch_inst, uint32_t umc_inst)
{
uint32_t channel_index;
uint64_t soc_pa, retired_page, column;
channel_index =
adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
/* translate umc channel address to soc pa, 3 parts are included */
soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
ADDR_OF_256B_BLOCK(channel_index) |
OFFSET_IN_256B_BLOCK(err_addr);
/* The umc channel bits are not original values, they are hashed */
SET_CHANNEL_HASH(channel_index, soc_pa);
/* clear [C4 C3 C2] in soc physical address */
soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT);
/* loop for all possibilities of [C4 C3 C2] */
for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
amdgpu_umc_fill_error_record(err_data, err_addr,
retired_page, channel_index, umc_inst);
/* shift R14 bit */
retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT);
dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
amdgpu_umc_fill_error_record(err_data, err_addr,
retired_page, channel_index, umc_inst);
}
}
static int umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev,
uint32_t node_inst, uint32_t umc_inst,
uint32_t ch_inst, void *data)
{
uint64_t mc_umc_status, err_addr;
uint32_t eccinfo_table_idx;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
struct ras_err_data *err_data = (struct ras_err_data *)data;
eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
if (mc_umc_status == 0)
return 0;
if (!err_data->err_addr)
return 0;
/* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_addr;
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
umc_v6_7_convert_error_address(adev, err_data, err_addr,
ch_inst, umc_inst);
}
return 0;
}
static void umc_v6_7_ecc_info_query_ras_error_address(struct amdgpu_device *adev,
void *ras_error_status)
{
amdgpu_umc_loop_channels(adev,
umc_v6_7_ecc_info_query_error_address, ras_error_status);
}
static void umc_v6_7_query_correctable_error_count(struct amdgpu_device *adev,
uint32_t umc_reg_offset,
unsigned long *error_count,
uint32_t ch_inst,
uint32_t umc_inst)
{
uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
uint32_t ecc_err_cnt, ecc_err_cnt_addr;
uint64_t mc_umc_status;
uint32_t mc_umc_status_addr;
/* UMC 6_1_1 registers */
ecc_err_cnt_sel_addr =
SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_EccErrCntSel);
ecc_err_cnt_addr =
SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_EccErrCnt);
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
/* select the lower chip and check the error count */
ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
EccErrCntCsSel, 0);
WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
*error_count +=
(REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
UMC_V6_7_CE_CNT_INIT);
/* select the higher chip and check the err counter */
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
EccErrCntCsSel, 1);
WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
*error_count +=
(REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
UMC_V6_7_CE_CNT_INIT);
/* check for SRAM correctable error
MCUMC_STATUS is a 64 bit register */
mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) {
*error_count += 1;
umc_v6_7_query_error_status_helper(adev, mc_umc_status, umc_reg_offset);
{
uint64_t err_addr, soc_pa;
uint32_t mc_umc_addrt0;
uint32_t channel_index;
mc_umc_addrt0 =
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
channel_index =
adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
/* translate umc channel address to soc pa, 3 parts are included */
soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
ADDR_OF_256B_BLOCK(channel_index) |
OFFSET_IN_256B_BLOCK(err_addr);
/* The umc channel bits are not original values, they are hashed */
SET_CHANNEL_HASH(channel_index, soc_pa);
dev_info(adev->dev, "Error Address(PA): 0x%llx\n", soc_pa);
}
}
}
static void umc_v6_7_querry_uncorrectable_error_count(struct amdgpu_device *adev,
uint32_t umc_reg_offset,
unsigned long *error_count)
{
uint64_t mc_umc_status;
uint32_t mc_umc_status_addr;
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
/* check the MCUMC_STATUS */
mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) {
*error_count += 1;
umc_v6_7_query_error_status_helper(adev, mc_umc_status, umc_reg_offset);
}
}
static int umc_v6_7_reset_error_count_per_channel(struct amdgpu_device *adev,
uint32_t node_inst, uint32_t umc_inst,
uint32_t ch_inst, void *data)
{
uint32_t ecc_err_cnt_addr;
uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
uint32_t umc_reg_offset =
get_umc_v6_7_reg_offset(adev, umc_inst, ch_inst);
ecc_err_cnt_sel_addr =
SOC15_REG_OFFSET(UMC, 0,
regUMCCH0_0_EccErrCntSel);
ecc_err_cnt_addr =
SOC15_REG_OFFSET(UMC, 0,
regUMCCH0_0_EccErrCnt);
/* select the lower chip */
ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
umc_reg_offset) * 4);
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
UMCCH0_0_EccErrCntSel,
EccErrCntCsSel, 0);
WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
ecc_err_cnt_sel);
/* clear lower chip error count */
WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
UMC_V6_7_CE_CNT_INIT);
/* select the higher chip */
ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
umc_reg_offset) * 4);
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
UMCCH0_0_EccErrCntSel,
EccErrCntCsSel, 1);
WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
ecc_err_cnt_sel);
/* clear higher chip error count */
WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
UMC_V6_7_CE_CNT_INIT);
return 0;
}
static void umc_v6_7_reset_error_count(struct amdgpu_device *adev)
{
amdgpu_umc_loop_channels(adev,
umc_v6_7_reset_error_count_per_channel, NULL);
}
static int umc_v6_7_query_ecc_error_count(struct amdgpu_device *adev,
uint32_t node_inst, uint32_t umc_inst,
uint32_t ch_inst, void *data)
{
struct ras_err_data *err_data = (struct ras_err_data *)data;
uint32_t umc_reg_offset =
get_umc_v6_7_reg_offset(adev, umc_inst, ch_inst);
umc_v6_7_query_correctable_error_count(adev,
umc_reg_offset,
&(err_data->ce_count),
ch_inst, umc_inst);
umc_v6_7_querry_uncorrectable_error_count(adev,
umc_reg_offset,
&(err_data->ue_count));
return 0;
}
static void umc_v6_7_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
amdgpu_umc_loop_channels(adev,
umc_v6_7_query_ecc_error_count, ras_error_status);
umc_v6_7_reset_error_count(adev);
}
static int umc_v6_7_query_error_address(struct amdgpu_device *adev,
uint32_t node_inst, uint32_t umc_inst,
uint32_t ch_inst, void *data)
{
uint32_t mc_umc_status_addr;
uint64_t mc_umc_status = 0, mc_umc_addrt0, err_addr;
struct ras_err_data *err_data = (struct ras_err_data *)data;
uint32_t umc_reg_offset =
get_umc_v6_7_reg_offset(adev, umc_inst, ch_inst);
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
mc_umc_addrt0 =
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
if (mc_umc_status == 0)
return 0;
if (!err_data->err_addr) {
/* clear umc status */
WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
return 0;
}
/* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
err_addr =
REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
umc_v6_7_convert_error_address(adev, err_data, err_addr,
ch_inst, umc_inst);
}
/* clear umc status */
WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
return 0;
}
static void umc_v6_7_query_ras_error_address(struct amdgpu_device *adev,
void *ras_error_status)
{
amdgpu_umc_loop_channels(adev,
umc_v6_7_query_error_address, ras_error_status);
}
static uint32_t umc_v6_7_query_ras_poison_mode_per_channel(
struct amdgpu_device *adev,
uint32_t umc_reg_offset)
{
uint32_t ecc_ctrl_addr, ecc_ctrl;
ecc_ctrl_addr =
SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_EccCtrl);
ecc_ctrl = RREG32_PCIE((ecc_ctrl_addr +
umc_reg_offset) * 4);
return REG_GET_FIELD(ecc_ctrl, UMCCH0_0_EccCtrl, UCFatalEn);
}
static bool umc_v6_7_query_ras_poison_mode(struct amdgpu_device *adev)
{
uint32_t umc_reg_offset = 0;
/* Enabling fatal error in umc instance0 channel0 will be
* considered as fatal error mode
*/
umc_reg_offset = get_umc_v6_7_reg_offset(adev, 0, 0);
return !umc_v6_7_query_ras_poison_mode_per_channel(adev, umc_reg_offset);
}
const struct amdgpu_ras_block_hw_ops umc_v6_7_ras_hw_ops = {
.query_ras_error_count = umc_v6_7_query_ras_error_count,
.query_ras_error_address = umc_v6_7_query_ras_error_address,
};
struct amdgpu_umc_ras umc_v6_7_ras = {
.ras_block = {
.hw_ops = &umc_v6_7_ras_hw_ops,
},
.query_ras_poison_mode = umc_v6_7_query_ras_poison_mode,
.ecc_info_query_ras_error_count = umc_v6_7_ecc_info_query_ras_error_count,
.ecc_info_query_ras_error_address = umc_v6_7_ecc_info_query_ras_error_address,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/umc_v6_7.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/pci.h>
#include "amdgpu.h"
#include "amdgpu_ucode.h"
#include "amdgpu_trace.h"
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
#include "gc/gc_11_0_0_default.h"
#include "hdp/hdp_6_0_0_offset.h"
#include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
#include "soc15_common.h"
#include "soc15.h"
#include "sdma_v6_0_0_pkt_open.h"
#include "nbio_v4_3.h"
#include "sdma_common.h"
#include "sdma_v6_0.h"
#include "v11_structs.h"
MODULE_FIRMWARE("amdgpu/sdma_6_0_0.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_0_1.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_0_2.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_0_3.bin");
MODULE_FIRMWARE("amdgpu/sdma_6_1_0.bin");
#define SDMA1_REG_OFFSET 0x600
#define SDMA0_HYP_DEC_REG_START 0x5880
#define SDMA0_HYP_DEC_REG_END 0x589a
#define SDMA1_HYP_DEC_REG_OFFSET 0x20
static void sdma_v6_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v6_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v6_0_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static int sdma_v6_0_start(struct amdgpu_device *adev);
static u32 sdma_v6_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset)
{
u32 base;
if (internal_offset >= SDMA0_HYP_DEC_REG_START &&
internal_offset <= SDMA0_HYP_DEC_REG_END) {
base = adev->reg_offset[GC_HWIP][0][1];
if (instance != 0)
internal_offset += SDMA1_HYP_DEC_REG_OFFSET * instance;
} else {
base = adev->reg_offset[GC_HWIP][0][0];
if (instance == 1)
internal_offset += SDMA1_REG_OFFSET;
}
return base + internal_offset;
}
static unsigned sdma_v6_0_ring_init_cond_exec(struct amdgpu_ring *ring)
{
unsigned ret;
amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COND_EXE));
amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
amdgpu_ring_write(ring, 1);
ret = ring->wptr & ring->buf_mask;/* this is the offset we need patch later */
amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */
return ret;
}
static void sdma_v6_0_ring_patch_cond_exec(struct amdgpu_ring *ring,
unsigned offset)
{
unsigned cur;
BUG_ON(offset > ring->buf_mask);
BUG_ON(ring->ring[offset] != 0x55aa55aa);
cur = (ring->wptr - 1) & ring->buf_mask;
if (cur > offset)
ring->ring[offset] = cur - offset;
else
ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
}
/**
* sdma_v6_0_ring_get_rptr - get the current read pointer
*
* @ring: amdgpu ring pointer
*
* Get the current rptr from the hardware.
*/
static uint64_t sdma_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
{
u64 *rptr;
/* XXX check if swapping is necessary on BE */
rptr = (u64 *)ring->rptr_cpu_addr;
DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr);
return ((*rptr) >> 2);
}
/**
* sdma_v6_0_ring_get_wptr - get the current write pointer
*
* @ring: amdgpu ring pointer
*
* Get the current wptr from the hardware.
*/
static uint64_t sdma_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
{
u64 wptr = 0;
if (ring->use_doorbell) {
/* XXX check if swapping is necessary on BE */
wptr = READ_ONCE(*((u64 *)ring->wptr_cpu_addr));
DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
}
return wptr >> 2;
}
/**
* sdma_v6_0_ring_set_wptr - commit the write pointer
*
* @ring: amdgpu ring pointer
*
* Write the wptr back to the hardware.
*/
static void sdma_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
uint32_t *wptr_saved;
uint32_t *is_queue_unmap;
uint64_t aggregated_db_index;
uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_DMA].mqd_size;
DRM_DEBUG("Setting write pointer\n");
if (ring->is_mes_queue) {
wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
sizeof(uint32_t));
aggregated_db_index =
amdgpu_mes_get_aggregated_doorbell_index(adev,
ring->hw_prio);
atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
ring->wptr << 2);
*wptr_saved = ring->wptr << 2;
if (*is_queue_unmap) {
WDOORBELL64(aggregated_db_index, ring->wptr << 2);
DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
ring->doorbell_index, ring->wptr << 2);
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
} else {
DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
ring->doorbell_index, ring->wptr << 2);
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
if (*is_queue_unmap)
WDOORBELL64(aggregated_db_index,
ring->wptr << 2);
}
} else {
if (ring->use_doorbell) {
DRM_DEBUG("Using doorbell -- "
"wptr_offs == 0x%08x "
"lower_32_bits(ring->wptr) << 2 == 0x%08x "
"upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
ring->wptr_offs,
lower_32_bits(ring->wptr << 2),
upper_32_bits(ring->wptr << 2));
/* XXX check if swapping is necessary on BE */
atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
ring->wptr << 2);
DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
ring->doorbell_index, ring->wptr << 2);
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
} else {
DRM_DEBUG("Not using doorbell -- "
"regSDMA%i_GFX_RB_WPTR == 0x%08x "
"regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
ring->me,
lower_32_bits(ring->wptr << 2),
ring->me,
upper_32_bits(ring->wptr << 2));
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev,
ring->me, regSDMA0_QUEUE0_RB_WPTR),
lower_32_bits(ring->wptr << 2));
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev,
ring->me, regSDMA0_QUEUE0_RB_WPTR_HI),
upper_32_bits(ring->wptr << 2));
}
}
}
static void sdma_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
int i;
for (i = 0; i < count; i++)
if (sdma && sdma->burst_nop && (i == 0))
amdgpu_ring_write(ring, ring->funcs->nop |
SDMA_PKT_NOP_HEADER_COUNT(count - 1));
else
amdgpu_ring_write(ring, ring->funcs->nop);
}
/**
* sdma_v6_0_ring_emit_ib - Schedule an IB on the DMA engine
*
* @ring: amdgpu ring pointer
* @ib: IB object to schedule
* @flags: unused
* @job: job to retrieve vmid from
*
* Schedule an IB in the DMA ring.
*/
static void sdma_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_job *job,
struct amdgpu_ib *ib,
uint32_t flags)
{
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
/* An IB packet must end on a 8 DW boundary--the next dword
* must be on a 8-dword boundary. Our IB packet below is 6
* dwords long, thus add x number of NOPs, such that, in
* modular arithmetic,
* wptr + 6 + x = 8k, k >= 0, which in C is,
* (wptr + 6 + x) % 8 = 0.
* The expression below, is a solution of x.
*/
sdma_v6_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
/* base must be 32 byte aligned */
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, ib->length_dw);
amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr));
amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr));
}
/**
* sdma_v6_0_ring_emit_mem_sync - flush the IB by graphics cache rinse
*
* @ring: amdgpu ring pointer
*
* flush the IB by graphics cache rinse.
*/
static void sdma_v6_0_ring_emit_mem_sync(struct amdgpu_ring *ring)
{
uint32_t gcr_cntl = SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | SDMA_GCR_GLM_INV |
SDMA_GCR_GL1_INV | SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV |
SDMA_GCR_GLI_INV(1);
/* flush entire cache L0/L1/L2, this can be optimized by performance requirement */
amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_GCR_REQ));
amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD1_BASE_VA_31_7(0));
amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD2_GCR_CONTROL_15_0(gcr_cntl) |
SDMA_PKT_GCR_REQ_PAYLOAD2_BASE_VA_47_32(0));
amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD3_LIMIT_VA_31_7(0) |
SDMA_PKT_GCR_REQ_PAYLOAD3_GCR_CONTROL_18_16(gcr_cntl >> 16));
amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD4_LIMIT_VA_47_32(0) |
SDMA_PKT_GCR_REQ_PAYLOAD4_VMID(0));
}
/**
* sdma_v6_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
*
* @ring: amdgpu ring pointer
*
* Emit an hdp flush packet on the requested DMA ring.
*/
static void sdma_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask = 0;
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
amdgpu_ring_write(ring, ref_and_mask); /* reference */
amdgpu_ring_write(ring, ref_and_mask); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
/**
* sdma_v6_0_ring_emit_fence - emit a fence on the DMA ring
*
* @ring: amdgpu ring pointer
* @addr: address
* @seq: fence seq number
* @flags: fence flags
*
* Add a DMA fence packet to the ring to write
* the fence seq number and DMA trap packet to generate
* an interrupt if needed.
*/
static void sdma_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags)
{
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
/* write the fence */
amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_FENCE) |
SDMA_PKT_FENCE_HEADER_MTYPE(0x3)); /* Ucached(UC) */
/* zero in first two bits */
BUG_ON(addr & 0x3);
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, lower_32_bits(seq));
/* optionally write high bits as well */
if (write64bit) {
addr += 4;
amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_FENCE) |
SDMA_PKT_FENCE_HEADER_MTYPE(0x3));
/* zero in first two bits */
BUG_ON(addr & 0x3);
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(seq));
}
if (flags & AMDGPU_FENCE_FLAG_INT) {
uint32_t ctx = ring->is_mes_queue ?
(ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0;
/* generate an interrupt */
amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_TRAP));
amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx));
}
}
/**
* sdma_v6_0_gfx_stop - stop the gfx async dma engines
*
* @adev: amdgpu_device pointer
*
* Stop the gfx async dma ring buffers.
*/
static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev)
{
u32 rb_cntl, ib_cntl;
int i;
amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 0);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
ib_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL));
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 0);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
}
}
/**
* sdma_v6_0_rlc_stop - stop the compute async dma engines
*
* @adev: amdgpu_device pointer
*
* Stop the compute async dma queues.
*/
static void sdma_v6_0_rlc_stop(struct amdgpu_device *adev)
{
/* XXX todo */
}
/**
* sdma_v6_0_ctxempty_int_enable - enable or disable context empty interrupts
*
* @adev: amdgpu_device pointer
* @enable: enable/disable context switching due to queue empty conditions
*
* Enable or disable the async dma engines queue empty context switch.
*/
static void sdma_v6_0_ctxempty_int_enable(struct amdgpu_device *adev, bool enable)
{
u32 f32_cntl;
int i;
if (!amdgpu_sriov_vf(adev)) {
for (i = 0; i < adev->sdma.num_instances; i++) {
f32_cntl = RREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_CNTL));
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
CTXEMPTY_INT_ENABLE, enable ? 1 : 0);
WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_CNTL), f32_cntl);
}
}
}
/**
* sdma_v6_0_enable - stop the async dma engines
*
* @adev: amdgpu_device pointer
* @enable: enable/disable the DMA MEs.
*
* Halt or unhalt the async dma engines.
*/
static void sdma_v6_0_enable(struct amdgpu_device *adev, bool enable)
{
u32 f32_cntl;
int i;
if (!enable) {
sdma_v6_0_gfx_stop(adev);
sdma_v6_0_rlc_stop(adev);
}
if (amdgpu_sriov_vf(adev))
return;
for (i = 0; i < adev->sdma.num_instances; i++) {
f32_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL));
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL), f32_cntl);
}
}
/**
* sdma_v6_0_gfx_resume - setup and start the async dma engines
*
* @adev: amdgpu_device pointer
*
* Set up the gfx DMA ring buffers and enable them.
* Returns 0 for success, error for failure.
*/
static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
u32 rb_cntl, ib_cntl;
u32 rb_bufsz;
u32 doorbell;
u32 doorbell_offset;
u32 temp;
u64 wptr_gpu_addr;
int i, r;
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
if (!amdgpu_sriov_vf(adev))
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
/* Set ring buffer size in dwords */
rb_bufsz = order_base_2(ring->ring_size / 4);
rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz);
#ifdef __BIG_ENDIAN
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SWAP_ENABLE, 1);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL,
RPTR_WRITEBACK_SWAP_ENABLE, 1);
#endif
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_PRIV, 1);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
/* Initialize the ring buffer's read and write pointers */
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR), 0);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_HI), 0);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), 0);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), 0);
/* setup the wptr shadow polling */
wptr_gpu_addr = ring->wptr_gpu_addr;
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO),
lower_32_bits(wptr_gpu_addr));
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI),
upper_32_bits(wptr_gpu_addr));
/* set the wb address whether it's enabled or not */
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_HI),
upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_LO),
lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, F32_WPTR_POLL_ENABLE, 1);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40);
ring->wptr = 0;
/* before programing wptr to a less value, need set minor_ptr_update first */
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 1);
if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
}
doorbell = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL));
doorbell_offset = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET));
if (ring->use_doorbell) {
doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 1);
doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_QUEUE0_DOORBELL_OFFSET,
OFFSET, ring->doorbell_index);
} else {
doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 0);
}
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL), doorbell);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET), doorbell_offset);
if (i == 0)
adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
ring->doorbell_index,
adev->doorbell_index.sdma_doorbell_range * adev->sdma.num_instances);
if (amdgpu_sriov_vf(adev))
sdma_v6_0_ring_set_wptr(ring);
/* set minor_ptr_update to 0 after wptr programed */
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 0);
/* Set up RESP_MODE to non-copy addresses */
temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL));
temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL), temp);
/* program default cache read and write policy */
temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE));
/* clean read policy and write policy bits */
temp &= 0xFF0FFF;
temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
(CACHE_WRITE_POLICY_L2__DEFAULT << 14) |
SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE), temp);
if (!amdgpu_sriov_vf(adev)) {
/* unhalt engine */
temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL));
temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, TH1_RESET, 0);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL), temp);
}
/* enable DMA RB */
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 1);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
ib_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL));
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 1);
#ifdef __BIG_ENDIAN
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_SWAP_ENABLE, 1);
#endif
/* enable DMA IBs */
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
if (amdgpu_sriov_vf(adev))
sdma_v6_0_enable(adev, true);
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
return 0;
}
/**
* sdma_v6_0_rlc_resume - setup and start the async dma engines
*
* @adev: amdgpu_device pointer
*
* Set up the compute DMA queues and enable them.
* Returns 0 for success, error for failure.
*/
static int sdma_v6_0_rlc_resume(struct amdgpu_device *adev)
{
return 0;
}
/**
* sdma_v6_0_load_microcode - load the sDMA ME ucode
*
* @adev: amdgpu_device pointer
*
* Loads the sDMA0/1 ucode.
* Returns 0 for success, -EINVAL if the ucode is not available.
*/
static int sdma_v6_0_load_microcode(struct amdgpu_device *adev)
{
const struct sdma_firmware_header_v2_0 *hdr;
const __le32 *fw_data;
u32 fw_size;
int i, j;
bool use_broadcast;
/* halt the MEs */
sdma_v6_0_enable(adev, false);
if (!adev->sdma.instance[0].fw)
return -EINVAL;
/* use broadcast mode to load SDMA microcode by default */
use_broadcast = true;
if (use_broadcast) {
dev_info(adev->dev, "Use broadcast method to load SDMA firmware\n");
/* load Control Thread microcode */
hdr = (const struct sdma_firmware_header_v2_0 *)adev->sdma.instance[0].fw->data;
amdgpu_ucode_print_sdma_hdr(&hdr->header);
fw_size = le32_to_cpu(hdr->ctx_jt_offset + hdr->ctx_jt_size) / 4;
fw_data = (const __le32 *)
(adev->sdma.instance[0].fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
WREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_BROADCAST_UCODE_ADDR), 0);
for (j = 0; j < fw_size; j++) {
if (amdgpu_emu_mode == 1 && j % 500 == 0)
msleep(1);
WREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_BROADCAST_UCODE_DATA), le32_to_cpup(fw_data++));
}
/* load Context Switch microcode */
fw_size = le32_to_cpu(hdr->ctl_jt_offset + hdr->ctl_jt_size) / 4;
fw_data = (const __le32 *)
(adev->sdma.instance[0].fw->data +
le32_to_cpu(hdr->ctl_ucode_offset));
WREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_BROADCAST_UCODE_ADDR), 0x8000);
for (j = 0; j < fw_size; j++) {
if (amdgpu_emu_mode == 1 && j % 500 == 0)
msleep(1);
WREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_BROADCAST_UCODE_DATA), le32_to_cpup(fw_data++));
}
} else {
dev_info(adev->dev, "Use legacy method to load SDMA firmware\n");
for (i = 0; i < adev->sdma.num_instances; i++) {
/* load Control Thread microcode */
hdr = (const struct sdma_firmware_header_v2_0 *)adev->sdma.instance[0].fw->data;
amdgpu_ucode_print_sdma_hdr(&hdr->header);
fw_size = le32_to_cpu(hdr->ctx_jt_offset + hdr->ctx_jt_size) / 4;
fw_data = (const __le32 *)
(adev->sdma.instance[0].fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_ADDR), 0);
for (j = 0; j < fw_size; j++) {
if (amdgpu_emu_mode == 1 && j % 500 == 0)
msleep(1);
WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
}
WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_ADDR), adev->sdma.instance[0].fw_version);
/* load Context Switch microcode */
fw_size = le32_to_cpu(hdr->ctl_jt_offset + hdr->ctl_jt_size) / 4;
fw_data = (const __le32 *)
(adev->sdma.instance[0].fw->data +
le32_to_cpu(hdr->ctl_ucode_offset));
WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_ADDR), 0x8000);
for (j = 0; j < fw_size; j++) {
if (amdgpu_emu_mode == 1 && j % 500 == 0)
msleep(1);
WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
}
WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UCODE_ADDR), adev->sdma.instance[0].fw_version);
}
}
return 0;
}
static int sdma_v6_0_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 tmp;
int i;
sdma_v6_0_gfx_stop(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
tmp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_FREEZE));
tmp |= SDMA0_FREEZE__FREEZE_MASK;
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_FREEZE), tmp);
tmp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL));
tmp |= SDMA0_F32_CNTL__HALT_MASK;
tmp |= SDMA0_F32_CNTL__TH1_RESET_MASK;
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_F32_CNTL), tmp);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_PREEMPT), 0);
udelay(100);
tmp = GRBM_SOFT_RESET__SOFT_RESET_SDMA0_MASK << i;
WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, tmp);
tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
udelay(100);
WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, 0);
tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
udelay(100);
}
return sdma_v6_0_start(adev);
}
static bool sdma_v6_0_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
int i, r;
long tmo = msecs_to_jiffies(1000);
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
r = amdgpu_ring_test_ib(ring, tmo);
if (r)
return true;
}
return false;
}
/**
* sdma_v6_0_start - setup and start the async dma engines
*
* @adev: amdgpu_device pointer
*
* Set up the DMA engines and enable them.
* Returns 0 for success, error for failure.
*/
static int sdma_v6_0_start(struct amdgpu_device *adev)
{
int r = 0;
if (amdgpu_sriov_vf(adev)) {
sdma_v6_0_enable(adev, false);
/* set RB registers */
r = sdma_v6_0_gfx_resume(adev);
return r;
}
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
r = sdma_v6_0_load_microcode(adev);
if (r)
return r;
/* The value of regSDMA_F32_CNTL is invalid the moment after loading fw */
if (amdgpu_emu_mode == 1)
msleep(1000);
}
/* unhalt the MEs */
sdma_v6_0_enable(adev, true);
/* enable sdma ring preemption */
sdma_v6_0_ctxempty_int_enable(adev, true);
/* start the gfx rings and rlc compute queues */
r = sdma_v6_0_gfx_resume(adev);
if (r)
return r;
r = sdma_v6_0_rlc_resume(adev);
return r;
}
static int sdma_v6_0_mqd_init(struct amdgpu_device *adev, void *mqd,
struct amdgpu_mqd_prop *prop)
{
struct v11_sdma_mqd *m = mqd;
uint64_t wb_gpu_addr;
m->sdmax_rlcx_rb_cntl =
order_base_2(prop->queue_size / 4) << SDMA0_QUEUE0_RB_CNTL__RB_SIZE__SHIFT |
1 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
4 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT |
1 << SDMA0_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT;
m->sdmax_rlcx_rb_base = lower_32_bits(prop->hqd_base_gpu_addr >> 8);
m->sdmax_rlcx_rb_base_hi = upper_32_bits(prop->hqd_base_gpu_addr >> 8);
wb_gpu_addr = prop->wptr_gpu_addr;
m->sdmax_rlcx_rb_wptr_poll_addr_lo = lower_32_bits(wb_gpu_addr);
m->sdmax_rlcx_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr);
wb_gpu_addr = prop->rptr_gpu_addr;
m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits(wb_gpu_addr);
m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits(wb_gpu_addr);
m->sdmax_rlcx_ib_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, 0,
regSDMA0_QUEUE0_IB_CNTL));
m->sdmax_rlcx_doorbell_offset =
prop->doorbell_index << SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT;
m->sdmax_rlcx_doorbell = REG_SET_FIELD(0, SDMA0_QUEUE0_DOORBELL, ENABLE, 1);
m->sdmax_rlcx_skip_cntl = 0;
m->sdmax_rlcx_context_status = 0;
m->sdmax_rlcx_doorbell_log = 0;
m->sdmax_rlcx_rb_aql_cntl = regSDMA0_QUEUE0_RB_AQL_CNTL_DEFAULT;
m->sdmax_rlcx_dummy_reg = regSDMA0_QUEUE0_DUMMY_REG_DEFAULT;
return 0;
}
static void sdma_v6_0_set_mqd_funcs(struct amdgpu_device *adev)
{
adev->mqds[AMDGPU_HW_IP_DMA].mqd_size = sizeof(struct v11_sdma_mqd);
adev->mqds[AMDGPU_HW_IP_DMA].init_mqd = sdma_v6_0_mqd_init;
}
/**
* sdma_v6_0_ring_test_ring - simple async dma engine test
*
* @ring: amdgpu_ring structure holding ring information
*
* Test the DMA engine by writing using it to write an
* value to memory.
* Returns 0 for success, error for failure.
*/
static int sdma_v6_0_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
unsigned i;
unsigned index;
int r;
u32 tmp;
u64 gpu_addr;
volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
if (ring->is_mes_queue) {
uint32_t offset = 0;
offset = amdgpu_mes_ctx_get_offs(ring,
AMDGPU_MES_CTX_PADDING_OFFS);
gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
*cpu_ptr = tmp;
} else {
r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
return r;
}
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(tmp);
}
r = amdgpu_ring_alloc(ring, 5);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
amdgpu_device_wb_free(adev, index);
return r;
}
amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
if (ring->is_mes_queue)
tmp = le32_to_cpu(*cpu_ptr);
else
tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
if (amdgpu_emu_mode == 1)
msleep(1);
else
udelay(1);
}
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
if (!ring->is_mes_queue)
amdgpu_device_wb_free(adev, index);
return r;
}
/**
* sdma_v6_0_ring_test_ib - test an IB on the DMA engine
*
* @ring: amdgpu_ring structure holding ring information
* @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
*
* Test a simple IB in the DMA ring.
* Returns 0 on success, error on failure.
*/
static int sdma_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct dma_fence *f = NULL;
unsigned index;
long r;
u32 tmp = 0;
u64 gpu_addr;
volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
memset(&ib, 0, sizeof(ib));
if (ring->is_mes_queue) {
uint32_t offset = 0;
offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
offset = amdgpu_mes_ctx_get_offs(ring,
AMDGPU_MES_CTX_PADDING_OFFS);
gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
*cpu_ptr = tmp;
} else {
r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
return r;
}
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(tmp);
r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
if (r) {
DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
goto err0;
}
}
ib.ptr[0] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
ib.ptr[1] = lower_32_bits(gpu_addr);
ib.ptr[2] = upper_32_bits(gpu_addr);
ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0);
ib.ptr[4] = 0xDEADBEEF;
ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
ib.length_dw = 8;
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
goto err1;
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
goto err1;
} else if (r < 0) {
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
}
if (ring->is_mes_queue)
tmp = le32_to_cpu(*cpu_ptr);
else
tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
r = 0;
else
r = -EINVAL;
err1:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err0:
if (!ring->is_mes_queue)
amdgpu_device_wb_free(adev, index);
return r;
}
/**
* sdma_v6_0_vm_copy_pte - update PTEs by copying them from the GART
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @src: src addr to copy from
* @count: number of page entries to update
*
* Update PTEs by copying them from the GART using sDMA.
*/
static void sdma_v6_0_vm_copy_pte(struct amdgpu_ib *ib,
uint64_t pe, uint64_t src,
unsigned count)
{
unsigned bytes = count * 8;
ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
ib->ptr[ib->length_dw++] = bytes - 1;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src);
ib->ptr[ib->length_dw++] = upper_32_bits(src);
ib->ptr[ib->length_dw++] = lower_32_bits(pe);
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
}
/**
* sdma_v6_0_vm_write_pte - update PTEs by writing them manually
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @value: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
*
* Update PTEs by writing them manually using sDMA.
*/
static void sdma_v6_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
uint64_t value, unsigned count,
uint32_t incr)
{
unsigned ndw = count * 2;
ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
ib->ptr[ib->length_dw++] = lower_32_bits(pe);
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = ndw - 1;
for (; ndw > 0; ndw -= 2) {
ib->ptr[ib->length_dw++] = lower_32_bits(value);
ib->ptr[ib->length_dw++] = upper_32_bits(value);
value += incr;
}
}
/**
* sdma_v6_0_vm_set_pte_pde - update the page tables using sDMA
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: access flags
*
* Update the page tables using sDMA.
*/
static void sdma_v6_0_vm_set_pte_pde(struct amdgpu_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint64_t flags)
{
/* for physically contiguous pages (vram) */
ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_PTEPDE);
ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
ib->ptr[ib->length_dw++] = upper_32_bits(flags);
ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
ib->ptr[ib->length_dw++] = incr; /* increment size */
ib->ptr[ib->length_dw++] = 0;
ib->ptr[ib->length_dw++] = count - 1; /* number of entries */
}
/**
* sdma_v6_0_ring_pad_ib - pad the IB
* @ib: indirect buffer to fill with padding
* @ring: amdgpu ring pointer
*
* Pad the IB with NOPs to a boundary multiple of 8.
*/
static void sdma_v6_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
u32 pad_count;
int i;
pad_count = (-ib->length_dw) & 0x7;
for (i = 0; i < pad_count; i++)
if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =
SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_NOP) |
SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
else
ib->ptr[ib->length_dw++] =
SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_NOP);
}
/**
* sdma_v6_0_ring_emit_pipeline_sync - sync the pipeline
*
* @ring: amdgpu_ring pointer
*
* Make sure all previous operations are completed (CIK).
*/
static void sdma_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
/* wait for idle */
amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); /* reference */
amdgpu_ring_write(ring, 0xffffffff); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
}
/**
* sdma_v6_0_ring_emit_vm_flush - vm flush using sDMA
*
* @ring: amdgpu_ring pointer
* @vmid: vmid number to use
* @pd_addr: address
*
* Update the page table base and flush the VM TLB
* using sDMA.
*/
static void sdma_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
/* Update the PD address for this VMID. */
amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
(hub->ctx_addr_distance * vmid),
lower_32_bits(pd_addr));
amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
(hub->ctx_addr_distance * vmid),
upper_32_bits(pd_addr));
/* Trigger invalidation. */
amdgpu_ring_write(ring,
SDMA_PKT_VM_INVALIDATION_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_VM_INVALIDATION_HEADER_SUB_OP(SDMA_SUBOP_VM_INVALIDATION) |
SDMA_PKT_VM_INVALIDATION_HEADER_GFX_ENG_ID(ring->vm_inv_eng) |
SDMA_PKT_VM_INVALIDATION_HEADER_MM_ENG_ID(0x1f));
amdgpu_ring_write(ring, req);
amdgpu_ring_write(ring, 0xFFFFFFFF);
amdgpu_ring_write(ring,
SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_INVALIDATEACK(1 << vmid) |
SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_ADDRESSRANGEHI(0x1F));
}
static void sdma_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
uint32_t reg, uint32_t val)
{
amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, val);
}
static void sdma_v6_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val, uint32_t mask)
{
amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
amdgpu_ring_write(ring, reg << 2);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, val); /* reference */
amdgpu_ring_write(ring, mask); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
}
static void sdma_v6_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
uint32_t reg0, uint32_t reg1,
uint32_t ref, uint32_t mask)
{
amdgpu_ring_emit_wreg(ring, reg0, ref);
/* wait for a cycle to reset vm_inv_eng*_ack */
amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0);
amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
}
static struct amdgpu_sdma_ras sdma_v6_0_3_ras = {
.ras_block = {
.ras_late_init = amdgpu_ras_block_late_init,
},
};
static void sdma_v6_0_set_ras_funcs(struct amdgpu_device *adev)
{
switch (adev->ip_versions[SDMA0_HWIP][0]) {
case IP_VERSION(6, 0, 3):
adev->sdma.ras = &sdma_v6_0_3_ras;
break;
default:
break;
}
}
static int sdma_v6_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
sdma_v6_0_set_ring_funcs(adev);
sdma_v6_0_set_buffer_funcs(adev);
sdma_v6_0_set_vm_pte_funcs(adev);
sdma_v6_0_set_irq_funcs(adev);
sdma_v6_0_set_mqd_funcs(adev);
sdma_v6_0_set_ras_funcs(adev);
return 0;
}
static int sdma_v6_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SDMA trap event */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
GFX_11_0_0__SRCID__SDMA_TRAP,
&adev->sdma.trap_irq);
if (r)
return r;
r = amdgpu_sdma_init_microcode(adev, 0, true);
if (r) {
DRM_ERROR("Failed to load sdma firmware!\n");
return r;
}
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
ring->use_doorbell = true;
ring->me = i;
DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
ring->use_doorbell?"true":"false");
ring->doorbell_index =
(adev->doorbell_index.sdma_engine[i] << 1); // get DWORD offset
ring->vm_hub = AMDGPU_GFXHUB(0);
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
AMDGPU_SDMA_IRQ_INSTANCE0 + i,
AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
if (amdgpu_sdma_ras_sw_init(adev)) {
dev_err(adev->dev, "Failed to initialize sdma ras block!\n");
return -EINVAL;
}
return r;
}
static int sdma_v6_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
amdgpu_sdma_destroy_inst_ctx(adev, true);
return 0;
}
static int sdma_v6_0_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return sdma_v6_0_start(adev);
}
static int sdma_v6_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (amdgpu_sriov_vf(adev)) {
/* disable the scheduler for SDMA */
amdgpu_sdma_unset_buffer_funcs_helper(adev);
return 0;
}
sdma_v6_0_ctxempty_int_enable(adev, false);
sdma_v6_0_enable(adev, false);
return 0;
}
static int sdma_v6_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return sdma_v6_0_hw_fini(adev);
}
static int sdma_v6_0_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return sdma_v6_0_hw_init(adev);
}
static bool sdma_v6_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 i;
for (i = 0; i < adev->sdma.num_instances; i++) {
u32 tmp = RREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_STATUS_REG));
if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
return false;
}
return true;
}
static int sdma_v6_0_wait_for_idle(void *handle)
{
unsigned i;
u32 sdma0, sdma1;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
sdma0 = RREG32(sdma_v6_0_get_reg_offset(adev, 0, regSDMA0_STATUS_REG));
sdma1 = RREG32(sdma_v6_0_get_reg_offset(adev, 1, regSDMA0_STATUS_REG));
if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK)
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
static int sdma_v6_0_ring_preempt_ib(struct amdgpu_ring *ring)
{
int i, r = 0;
struct amdgpu_device *adev = ring->adev;
u32 index = 0;
u64 sdma_gfx_preempt;
amdgpu_sdma_get_index_from_ring(ring, &index);
sdma_gfx_preempt =
sdma_v6_0_get_reg_offset(adev, index, regSDMA0_QUEUE0_PREEMPT);
/* assert preemption condition */
amdgpu_ring_set_preempt_cond_exec(ring, false);
/* emit the trailing fence */
ring->trail_seq += 1;
amdgpu_ring_alloc(ring, 10);
sdma_v6_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
ring->trail_seq, 0);
amdgpu_ring_commit(ring);
/* assert IB preemption */
WREG32(sdma_gfx_preempt, 1);
/* poll the trailing fence */
for (i = 0; i < adev->usec_timeout; i++) {
if (ring->trail_seq ==
le32_to_cpu(*(ring->trail_fence_cpu_addr)))
break;
udelay(1);
}
if (i >= adev->usec_timeout) {
r = -EINVAL;
DRM_ERROR("ring %d failed to be preempted\n", ring->idx);
}
/* deassert IB preemption */
WREG32(sdma_gfx_preempt, 0);
/* deassert the preemption condition */
amdgpu_ring_set_preempt_cond_exec(ring, true);
return r;
}
static int sdma_v6_0_set_trap_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
u32 sdma_cntl;
u32 reg_offset = sdma_v6_0_get_reg_offset(adev, type, regSDMA0_CNTL);
if (!amdgpu_sriov_vf(adev)) {
sdma_cntl = RREG32(reg_offset);
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
WREG32(reg_offset, sdma_cntl);
}
return 0;
}
static int sdma_v6_0_process_trap_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
int instances, queue;
uint32_t mes_queue_id = entry->src_data[0];
DRM_DEBUG("IH: SDMA trap\n");
if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
struct amdgpu_mes_queue *queue;
mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
spin_lock(&adev->mes.queue_id_lock);
queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
if (queue) {
DRM_DEBUG("process smda queue id = %d\n", mes_queue_id);
amdgpu_fence_process(queue->ring);
}
spin_unlock(&adev->mes.queue_id_lock);
return 0;
}
queue = entry->ring_id & 0xf;
instances = (entry->ring_id & 0xf0) >> 4;
if (instances > 1) {
DRM_ERROR("IH: wrong ring_ID detected, as wrong sdma instance\n");
return -EINVAL;
}
switch (entry->client_id) {
case SOC21_IH_CLIENTID_GFX:
switch (queue) {
case 0:
amdgpu_fence_process(&adev->sdma.instance[instances].ring);
break;
default:
break;
}
break;
}
return 0;
}
static int sdma_v6_0_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
return 0;
}
static int sdma_v6_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
return 0;
}
static int sdma_v6_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
return 0;
}
static void sdma_v6_0_get_clockgating_state(void *handle, u64 *flags)
{
}
const struct amd_ip_funcs sdma_v6_0_ip_funcs = {
.name = "sdma_v6_0",
.early_init = sdma_v6_0_early_init,
.late_init = NULL,
.sw_init = sdma_v6_0_sw_init,
.sw_fini = sdma_v6_0_sw_fini,
.hw_init = sdma_v6_0_hw_init,
.hw_fini = sdma_v6_0_hw_fini,
.suspend = sdma_v6_0_suspend,
.resume = sdma_v6_0_resume,
.is_idle = sdma_v6_0_is_idle,
.wait_for_idle = sdma_v6_0_wait_for_idle,
.soft_reset = sdma_v6_0_soft_reset,
.check_soft_reset = sdma_v6_0_check_soft_reset,
.set_clockgating_state = sdma_v6_0_set_clockgating_state,
.set_powergating_state = sdma_v6_0_set_powergating_state,
.get_clockgating_state = sdma_v6_0_get_clockgating_state,
};
static const struct amdgpu_ring_funcs sdma_v6_0_ring_funcs = {
.type = AMDGPU_RING_TYPE_SDMA,
.align_mask = 0xf,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
.secure_submission_supported = true,
.get_rptr = sdma_v6_0_ring_get_rptr,
.get_wptr = sdma_v6_0_ring_get_wptr,
.set_wptr = sdma_v6_0_ring_set_wptr,
.emit_frame_size =
5 + /* sdma_v6_0_ring_init_cond_exec */
6 + /* sdma_v6_0_ring_emit_hdp_flush */
6 + /* sdma_v6_0_ring_emit_pipeline_sync */
/* sdma_v6_0_ring_emit_vm_flush */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
10 + 10 + 10, /* sdma_v6_0_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 5 + 7 + 6, /* sdma_v6_0_ring_emit_ib */
.emit_ib = sdma_v6_0_ring_emit_ib,
.emit_mem_sync = sdma_v6_0_ring_emit_mem_sync,
.emit_fence = sdma_v6_0_ring_emit_fence,
.emit_pipeline_sync = sdma_v6_0_ring_emit_pipeline_sync,
.emit_vm_flush = sdma_v6_0_ring_emit_vm_flush,
.emit_hdp_flush = sdma_v6_0_ring_emit_hdp_flush,
.test_ring = sdma_v6_0_ring_test_ring,
.test_ib = sdma_v6_0_ring_test_ib,
.insert_nop = sdma_v6_0_ring_insert_nop,
.pad_ib = sdma_v6_0_ring_pad_ib,
.emit_wreg = sdma_v6_0_ring_emit_wreg,
.emit_reg_wait = sdma_v6_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = sdma_v6_0_ring_emit_reg_write_reg_wait,
.init_cond_exec = sdma_v6_0_ring_init_cond_exec,
.patch_cond_exec = sdma_v6_0_ring_patch_cond_exec,
.preempt_ib = sdma_v6_0_ring_preempt_ib,
};
static void sdma_v6_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.instance[i].ring.funcs = &sdma_v6_0_ring_funcs;
adev->sdma.instance[i].ring.me = i;
}
}
static const struct amdgpu_irq_src_funcs sdma_v6_0_trap_irq_funcs = {
.set = sdma_v6_0_set_trap_irq_state,
.process = sdma_v6_0_process_trap_irq,
};
static const struct amdgpu_irq_src_funcs sdma_v6_0_illegal_inst_irq_funcs = {
.process = sdma_v6_0_process_illegal_inst_irq,
};
static void sdma_v6_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 +
adev->sdma.num_instances;
adev->sdma.trap_irq.funcs = &sdma_v6_0_trap_irq_funcs;
adev->sdma.illegal_inst_irq.funcs = &sdma_v6_0_illegal_inst_irq_funcs;
}
/**
* sdma_v6_0_emit_copy_buffer - copy buffer using the sDMA engine
*
* @ib: indirect buffer to fill with commands
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
* @tmz: if a secure copy should be used
*
* Copy GPU buffers using the DMA engine.
* Used by the amdgpu ttm implementation to move pages if
* registered as the asic copy callback.
*/
static void sdma_v6_0_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
uint32_t byte_count,
bool tmz)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
ib->ptr[ib->length_dw++] = byte_count - 1;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
}
/**
* sdma_v6_0_emit_fill_buffer - fill buffer using the sDMA engine
*
* @ib: indirect buffer to fill
* @src_data: value to write to buffer
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
*
* Fill GPU buffers using the DMA engine.
*/
static void sdma_v6_0_emit_fill_buffer(struct amdgpu_ib *ib,
uint32_t src_data,
uint64_t dst_offset,
uint32_t byte_count)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_CONST_FILL);
ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = src_data;
ib->ptr[ib->length_dw++] = byte_count - 1;
}
static const struct amdgpu_buffer_funcs sdma_v6_0_buffer_funcs = {
.copy_max_bytes = 0x400000,
.copy_num_dw = 7,
.emit_copy_buffer = sdma_v6_0_emit_copy_buffer,
.fill_max_bytes = 0x400000,
.fill_num_dw = 5,
.emit_fill_buffer = sdma_v6_0_emit_fill_buffer,
};
static void sdma_v6_0_set_buffer_funcs(struct amdgpu_device *adev)
{
adev->mman.buffer_funcs = &sdma_v6_0_buffer_funcs;
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
static const struct amdgpu_vm_pte_funcs sdma_v6_0_vm_pte_funcs = {
.copy_pte_num_dw = 7,
.copy_pte = sdma_v6_0_vm_copy_pte,
.write_pte = sdma_v6_0_vm_write_pte,
.set_pte_pde = sdma_v6_0_vm_set_pte_pde,
};
static void sdma_v6_0_set_vm_pte_funcs(struct amdgpu_device *adev)
{
unsigned i;
adev->vm_manager.vm_pte_funcs = &sdma_v6_0_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) {
adev->vm_manager.vm_pte_scheds[i] =
&adev->sdma.instance[i].ring.sched;
}
adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version sdma_v6_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_SDMA,
.major = 6,
.minor = 0,
.rev = 0,
.funcs = &sdma_v6_0_ip_funcs,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c |
/*
* Copyright 2014 Advanced Micro Devices, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* Authors: Christian König <[email protected]>
*/
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_vce.h"
#include "vid.h"
#include "vce/vce_3_0_d.h"
#include "vce/vce_3_0_sh_mask.h"
#include "oss/oss_3_0_d.h"
#include "oss/oss_3_0_sh_mask.h"
#include "gca/gfx_8_0_d.h"
#include "smu/smu_7_1_2_d.h"
#include "smu/smu_7_1_2_sh_mask.h"
#include "gca/gfx_8_0_sh_mask.h"
#include "ivsrcid/ivsrcid_vislands30.h"
#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
#define GRBM_GFX_INDEX__VCE_ALL_PIPE 0x07
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
#define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000
#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
#define VCE_V3_0_FW_SIZE (384 * 1024)
#define VCE_V3_0_STACK_SIZE (64 * 1024)
#define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
#define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8))
#define GET_VCE_INSTANCE(i) ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \
| GRBM_GFX_INDEX__VCE_ALL_PIPE)
static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
static int vce_v3_0_wait_for_idle(void *handle);
static int vce_v3_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state);
/**
* vce_v3_0_ring_get_rptr - get read pointer
*
* @ring: amdgpu_ring pointer
*
* Returns the current hardware read pointer
*/
static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 v;
mutex_lock(&adev->grbm_idx_mutex);
if (adev->vce.harvest_config == 0 ||
adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
if (ring->me == 0)
v = RREG32(mmVCE_RB_RPTR);
else if (ring->me == 1)
v = RREG32(mmVCE_RB_RPTR2);
else
v = RREG32(mmVCE_RB_RPTR3);
WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
mutex_unlock(&adev->grbm_idx_mutex);
return v;
}
/**
* vce_v3_0_ring_get_wptr - get write pointer
*
* @ring: amdgpu_ring pointer
*
* Returns the current hardware write pointer
*/
static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 v;
mutex_lock(&adev->grbm_idx_mutex);
if (adev->vce.harvest_config == 0 ||
adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
if (ring->me == 0)
v = RREG32(mmVCE_RB_WPTR);
else if (ring->me == 1)
v = RREG32(mmVCE_RB_WPTR2);
else
v = RREG32(mmVCE_RB_WPTR3);
WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
mutex_unlock(&adev->grbm_idx_mutex);
return v;
}
/**
* vce_v3_0_ring_set_wptr - set write pointer
*
* @ring: amdgpu_ring pointer
*
* Commits the write pointer to the hardware
*/
static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
mutex_lock(&adev->grbm_idx_mutex);
if (adev->vce.harvest_config == 0 ||
adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1)
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
if (ring->me == 0)
WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
else if (ring->me == 1)
WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
else
WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
mutex_unlock(&adev->grbm_idx_mutex);
}
static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
{
WREG32_FIELD(VCE_RB_ARB_CTRL, VCE_CGTT_OVERRIDE, override ? 1 : 0);
}
static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
bool gated)
{
u32 data;
/* Set Override to disable Clock Gating */
vce_v3_0_override_vce_clock_gating(adev, true);
/* This function enables MGCG which is controlled by firmware.
With the clocks in the gated state the core is still
accessible but the firmware will throttle the clocks on the
fly as necessary.
*/
if (!gated) {
data = RREG32(mmVCE_CLOCK_GATING_B);
data |= 0x1ff;
data &= ~0xef0000;
WREG32(mmVCE_CLOCK_GATING_B, data);
data = RREG32(mmVCE_UENC_CLOCK_GATING);
data |= 0x3ff000;
data &= ~0xffc00000;
WREG32(mmVCE_UENC_CLOCK_GATING, data);
data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
data |= 0x2;
data &= ~0x00010000;
WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
data |= 0x37f;
WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
0x8;
WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
} else {
data = RREG32(mmVCE_CLOCK_GATING_B);
data &= ~0x80010;
data |= 0xe70008;
WREG32(mmVCE_CLOCK_GATING_B, data);
data = RREG32(mmVCE_UENC_CLOCK_GATING);
data |= 0xffc00000;
WREG32(mmVCE_UENC_CLOCK_GATING, data);
data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
data |= 0x10000;
WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
data &= ~0x3ff;
WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
0x8);
WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
}
vce_v3_0_override_vce_clock_gating(adev, false);
}
static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev)
{
int i, j;
for (i = 0; i < 10; ++i) {
for (j = 0; j < 100; ++j) {
uint32_t status = RREG32(mmVCE_STATUS);
if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
return 0;
mdelay(10);
}
DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
mdelay(10);
WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
mdelay(10);
}
return -ETIMEDOUT;
}
/**
* vce_v3_0_start - start VCE block
*
* @adev: amdgpu_device pointer
*
* Setup and start the VCE block
*/
static int vce_v3_0_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
int idx, r;
mutex_lock(&adev->grbm_idx_mutex);
for (idx = 0; idx < 2; ++idx) {
if (adev->vce.harvest_config & (1 << idx))
continue;
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
/* Program instance 0 reg space for two instances or instance 0 case
program instance 1 reg space for only instance 1 available case */
if (idx != 1 || adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) {
ring = &adev->vce.ring[0];
WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
ring = &adev->vce.ring[1];
WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
ring = &adev->vce.ring[2];
WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr));
WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr);
WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr));
WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4);
}
vce_v3_0_mc_resume(adev, idx);
WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
if (adev->asic_type >= CHIP_STONEY)
WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
else
WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
mdelay(100);
r = vce_v3_0_firmware_loaded(adev);
/* clear BUSY flag */
WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0);
if (r) {
DRM_ERROR("VCE not responding, giving up!!!\n");
mutex_unlock(&adev->grbm_idx_mutex);
return r;
}
}
WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
}
static int vce_v3_0_stop(struct amdgpu_device *adev)
{
int idx;
mutex_lock(&adev->grbm_idx_mutex);
for (idx = 0; idx < 2; ++idx) {
if (adev->vce.harvest_config & (1 << idx))
continue;
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
if (adev->asic_type >= CHIP_STONEY)
WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
else
WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 0);
/* hold on ECPU */
WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
/* clear VCE STATUS */
WREG32(mmVCE_STATUS, 0);
}
WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
}
#define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074
#define VCE_HARVEST_FUSE_MACRO__SHIFT 27
#define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000
static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
{
u32 tmp;
if ((adev->asic_type == CHIP_FIJI) ||
(adev->asic_type == CHIP_STONEY))
return AMDGPU_VCE_HARVEST_VCE1;
if (adev->flags & AMD_IS_APU)
tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
VCE_HARVEST_FUSE_MACRO__MASK) >>
VCE_HARVEST_FUSE_MACRO__SHIFT;
else
tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) &
CC_HARVEST_FUSES__VCE_DISABLE_MASK) >>
CC_HARVEST_FUSES__VCE_DISABLE__SHIFT;
switch (tmp) {
case 1:
return AMDGPU_VCE_HARVEST_VCE0;
case 2:
return AMDGPU_VCE_HARVEST_VCE1;
case 3:
return AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
default:
if ((adev->asic_type == CHIP_POLARIS10) ||
(adev->asic_type == CHIP_POLARIS11) ||
(adev->asic_type == CHIP_POLARIS12) ||
(adev->asic_type == CHIP_VEGAM))
return AMDGPU_VCE_HARVEST_VCE1;
return 0;
}
}
static int vce_v3_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
if ((adev->vce.harvest_config &
(AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) ==
(AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
return -ENOENT;
adev->vce.num_rings = 3;
vce_v3_0_set_ring_funcs(adev);
vce_v3_0_set_irq_funcs(adev);
return 0;
}
static int vce_v3_0_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
int r, i;
/* VCE */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_VCE_TRAP, &adev->vce.irq);
if (r)
return r;
r = amdgpu_vce_sw_init(adev, VCE_V3_0_FW_SIZE +
(VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE) * 2);
if (r)
return r;
/* 52.8.3 required for 3 ring support */
if (adev->vce.fw_version < FW_52_8_3)
adev->vce.num_rings = 2;
r = amdgpu_vce_resume(adev);
if (r)
return r;
for (i = 0; i < adev->vce.num_rings; i++) {
enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i);
ring = &adev->vce.ring[i];
sprintf(ring->name, "vce%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
hw_prio, NULL);
if (r)
return r;
}
r = amdgpu_vce_entity_init(adev);
return r;
}
static int vce_v3_0_sw_fini(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = amdgpu_vce_suspend(adev);
if (r)
return r;
return amdgpu_vce_sw_fini(adev);
}
static int vce_v3_0_hw_init(void *handle)
{
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
vce_v3_0_override_vce_clock_gating(adev, true);
amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
for (i = 0; i < adev->vce.num_rings; i++) {
r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
if (r)
return r;
}
DRM_INFO("VCE initialized successfully.\n");
return 0;
}
static int vce_v3_0_hw_fini(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
cancel_delayed_work_sync(&adev->vce.idle_work);
r = vce_v3_0_wait_for_idle(handle);
if (r)
return r;
vce_v3_0_stop(adev);
return vce_v3_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
}
static int vce_v3_0_suspend(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/*
* Proper cleanups before halting the HW engine:
* - cancel the delayed idle work
* - enable powergating
* - enable clockgating
* - disable dpm
*
* TODO: to align with the VCN implementation, move the
* jobs for clockgating/powergating/dpm setting to
* ->set_powergating_state().
*/
cancel_delayed_work_sync(&adev->vce.idle_work);
if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_vce(adev, false);
} else {
amdgpu_asic_set_vce_clocks(adev, 0, 0);
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_GATE);
amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_GATE);
}
r = vce_v3_0_hw_fini(adev);
if (r)
return r;
return amdgpu_vce_suspend(adev);
}
static int vce_v3_0_resume(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = amdgpu_vce_resume(adev);
if (r)
return r;
return vce_v3_0_hw_init(adev);
}
static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
{
uint32_t offset, size;
WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
WREG32(mmVCE_CLOCK_GATING_B, 0x1FF);
WREG32(mmVCE_LMI_CTRL, 0x00398000);
WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
WREG32(mmVCE_LMI_SWAP_CNTL, 0);
WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
WREG32(mmVCE_LMI_VM_CTRL, 0);
WREG32_OR(mmVCE_VCPU_CNTL, 0x00100000);
if (adev->asic_type >= CHIP_STONEY) {
WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8));
WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8));
WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8));
} else
WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
offset = AMDGPU_VCE_FIRMWARE_OFFSET;
size = VCE_V3_0_FW_SIZE;
WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
if (idx == 0) {
offset += size;
size = VCE_V3_0_STACK_SIZE;
WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
offset += size;
size = VCE_V3_0_DATA_SIZE;
WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
} else {
offset += size + VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE;
size = VCE_V3_0_STACK_SIZE;
WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0xfffffff);
WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
offset += size;
size = VCE_V3_0_DATA_SIZE;
WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0xfffffff);
WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
}
WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
}
static bool vce_v3_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 mask = 0;
mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK;
mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK;
return !(RREG32(mmSRBM_STATUS2) & mask);
}
static int vce_v3_0_wait_for_idle(void *handle)
{
unsigned i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++)
if (vce_v3_0_is_idle(handle))
return 0;
return -ETIMEDOUT;
}
#define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */
#define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */
#define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */
#define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
static bool vce_v3_0_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
/* According to VCE team , we should use VCE_STATUS instead
* SRBM_STATUS.VCE_BUSY bit for busy status checking.
* GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE
* instance's registers are accessed
* (0 for 1st instance, 10 for 2nd instance).
*
*VCE_STATUS
*|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB |
*|----+----+-----------+----+----+----+----------+---------+----|
*|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0|
*
* VCE team suggest use bit 3--bit 6 for busy status check
*/
mutex_lock(&adev->grbm_idx_mutex);
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
}
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
}
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
mutex_unlock(&adev->grbm_idx_mutex);
if (srbm_soft_reset) {
adev->vce.srbm_soft_reset = srbm_soft_reset;
return true;
} else {
adev->vce.srbm_soft_reset = 0;
return false;
}
}
static int vce_v3_0_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset;
if (!adev->vce.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->vce.srbm_soft_reset;
if (srbm_soft_reset) {
u32 tmp;
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(mmSRBM_SOFT_RESET, tmp);
tmp = RREG32(mmSRBM_SOFT_RESET);
udelay(50);
tmp &= ~srbm_soft_reset;
WREG32(mmSRBM_SOFT_RESET, tmp);
tmp = RREG32(mmSRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
}
return 0;
}
static int vce_v3_0_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->vce.srbm_soft_reset)
return 0;
mdelay(5);
return vce_v3_0_suspend(adev);
}
static int vce_v3_0_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->vce.srbm_soft_reset)
return 0;
mdelay(5);
return vce_v3_0_resume(adev);
}
static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
uint32_t val = 0;
if (state == AMDGPU_IRQ_STATE_ENABLE)
val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
return 0;
}
static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
DRM_DEBUG("IH: VCE\n");
WREG32_FIELD(VCE_SYS_INT_STATUS, VCE_SYS_INT_TRAP_INTERRUPT_INT, 1);
switch (entry->src_data[0]) {
case 0:
case 1:
case 2:
amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
break;
}
return 0;
}
static int vce_v3_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE);
int i;
if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
return 0;
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < 2; i++) {
/* Program VCE Instance 0 or 1 if not harvested */
if (adev->vce.harvest_config & (1 << i))
continue;
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i));
if (!enable) {
/* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
uint32_t data = RREG32(mmVCE_CLOCK_GATING_A);
data &= ~(0xf | 0xff0);
data |= ((0x0 << 0) | (0x04 << 4));
WREG32(mmVCE_CLOCK_GATING_A, data);
/* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */
data = RREG32(mmVCE_UENC_CLOCK_GATING);
data &= ~(0xf | 0xff0);
data |= ((0x0 << 0) | (0x04 << 4));
WREG32(mmVCE_UENC_CLOCK_GATING, data);
}
vce_v3_0_set_vce_sw_clock_gating(adev, enable);
}
WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
}
static int vce_v3_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the VCE block.
* That's done in the dpm code via the SMC. This
* just re-inits the block as necessary. The actual
* gating still happens in the dpm code. We should
* revisit this when there is a cleaner line between
* the smc and the hw blocks
*/
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret = 0;
if (state == AMD_PG_STATE_GATE) {
ret = vce_v3_0_stop(adev);
if (ret)
goto out;
} else {
ret = vce_v3_0_start(adev);
if (ret)
goto out;
}
out:
return ret;
}
static void vce_v3_0_get_clockgating_state(void *handle, u64 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int data;
mutex_lock(&adev->pm.mutex);
if (adev->flags & AMD_IS_APU)
data = RREG32_SMC(ixCURRENT_PG_STATUS_APU);
else
data = RREG32_SMC(ixCURRENT_PG_STATUS);
if (data & CURRENT_PG_STATUS__VCE_PG_STATUS_MASK) {
DRM_INFO("Cannot get clockgating state when VCE is powergated.\n");
goto out;
}
WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
/* AMD_CG_SUPPORT_VCE_MGCG */
data = RREG32(mmVCE_CLOCK_GATING_A);
if (data & (0x04 << 4))
*flags |= AMD_CG_SUPPORT_VCE_MGCG;
out:
mutex_unlock(&adev->pm.mutex);
}
static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_job *job,
struct amdgpu_ib *ib,
uint32_t flags)
{
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
amdgpu_ring_write(ring, VCE_CMD_IB_VM);
amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, ib->length_dw);
}
static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vmid, uint64_t pd_addr)
{
amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB);
amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, pd_addr >> 12);
amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB);
amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, VCE_CMD_END);
}
static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring)
{
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
amdgpu_ring_write(ring, VCE_CMD_WAIT_GE);
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, seq);
}
static const struct amd_ip_funcs vce_v3_0_ip_funcs = {
.name = "vce_v3_0",
.early_init = vce_v3_0_early_init,
.late_init = NULL,
.sw_init = vce_v3_0_sw_init,
.sw_fini = vce_v3_0_sw_fini,
.hw_init = vce_v3_0_hw_init,
.hw_fini = vce_v3_0_hw_fini,
.suspend = vce_v3_0_suspend,
.resume = vce_v3_0_resume,
.is_idle = vce_v3_0_is_idle,
.wait_for_idle = vce_v3_0_wait_for_idle,
.check_soft_reset = vce_v3_0_check_soft_reset,
.pre_soft_reset = vce_v3_0_pre_soft_reset,
.soft_reset = vce_v3_0_soft_reset,
.post_soft_reset = vce_v3_0_post_soft_reset,
.set_clockgating_state = vce_v3_0_set_clockgating_state,
.set_powergating_state = vce_v3_0_set_powergating_state,
.get_clockgating_state = vce_v3_0_get_clockgating_state,
};
static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
.type = AMDGPU_RING_TYPE_VCE,
.align_mask = 0xf,
.nop = VCE_CMD_NO_OP,
.support_64bit_ptrs = false,
.no_user_fence = true,
.get_rptr = vce_v3_0_ring_get_rptr,
.get_wptr = vce_v3_0_ring_get_wptr,
.set_wptr = vce_v3_0_ring_set_wptr,
.parse_cs = amdgpu_vce_ring_parse_cs,
.emit_frame_size =
4 + /* vce_v3_0_emit_pipeline_sync */
6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
.emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
.emit_ib = amdgpu_vce_ring_emit_ib,
.emit_fence = amdgpu_vce_ring_emit_fence,
.test_ring = amdgpu_vce_ring_test_ring,
.test_ib = amdgpu_vce_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vce_ring_begin_use,
.end_use = amdgpu_vce_ring_end_use,
};
static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCE,
.align_mask = 0xf,
.nop = VCE_CMD_NO_OP,
.support_64bit_ptrs = false,
.no_user_fence = true,
.get_rptr = vce_v3_0_ring_get_rptr,
.get_wptr = vce_v3_0_ring_get_wptr,
.set_wptr = vce_v3_0_ring_set_wptr,
.parse_cs = amdgpu_vce_ring_parse_cs_vm,
.emit_frame_size =
6 + /* vce_v3_0_emit_vm_flush */
4 + /* vce_v3_0_emit_pipeline_sync */
6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */
.emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
.emit_ib = vce_v3_0_ring_emit_ib,
.emit_vm_flush = vce_v3_0_emit_vm_flush,
.emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
.emit_fence = amdgpu_vce_ring_emit_fence,
.test_ring = amdgpu_vce_ring_test_ring,
.test_ib = amdgpu_vce_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vce_ring_begin_use,
.end_use = amdgpu_vce_ring_end_use,
};
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
if (adev->asic_type >= CHIP_STONEY) {
for (i = 0; i < adev->vce.num_rings; i++) {
adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs;
adev->vce.ring[i].me = i;
}
DRM_INFO("VCE enabled in VM mode\n");
} else {
for (i = 0; i < adev->vce.num_rings; i++) {
adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs;
adev->vce.ring[i].me = i;
}
DRM_INFO("VCE enabled in physical mode\n");
}
}
static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = {
.set = vce_v3_0_set_interrupt_state,
.process = vce_v3_0_process_interrupt,
};
static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->vce.irq.num_types = 1;
adev->vce.irq.funcs = &vce_v3_0_irq_funcs;
};
const struct amdgpu_ip_block_version vce_v3_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_VCE,
.major = 3,
.minor = 0,
.rev = 0,
.funcs = &vce_v3_0_ip_funcs,
};
const struct amdgpu_ip_block_version vce_v3_1_ip_block = {
.type = AMD_IP_BLOCK_TYPE_VCE,
.major = 3,
.minor = 1,
.rev = 0,
.funcs = &vce_v3_0_ip_funcs,
};
const struct amdgpu_ip_block_version vce_v3_4_ip_block = {
.type = AMD_IP_BLOCK_TYPE_VCE,
.major = 3,
.minor = 4,
.rev = 0,
.funcs = &vce_v3_0_ip_funcs,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/vce_v3_0.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "smuio_v11_0.h"
#include "smuio/smuio_11_0_0_offset.h"
#include "smuio/smuio_11_0_0_sh_mask.h"
static u32 smuio_v11_0_get_rom_index_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX);
}
static u32 smuio_v11_0_get_rom_data_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA);
}
static void smuio_v11_0_update_rom_clock_gating(struct amdgpu_device *adev, bool enable)
{
u32 def, data;
/* enable/disable ROM CG is not supported on APU */
if (adev->flags & AMD_IS_APU)
return;
if (!(adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
return;
def = data = RREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0);
if (enable)
data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
else
data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
if (def != data)
WREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0, data);
}
static void smuio_v11_0_get_clock_gating_state(struct amdgpu_device *adev, u64 *flags)
{
u32 data;
/* CGTT_ROM_CLK_CTRL0 is not available for APU */
if (adev->flags & AMD_IS_APU)
return;
data = RREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0);
if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
*flags |= AMD_CG_SUPPORT_ROM_MGCG;
}
const struct amdgpu_smuio_funcs smuio_v11_0_funcs = {
.get_rom_index_offset = smuio_v11_0_get_rom_index_offset,
.get_rom_data_offset = smuio_v11_0_get_rom_data_offset,
.update_rom_clock_gating = smuio_v11_0_update_rom_clock_gating,
.get_clock_gating_state = smuio_v11_0_get_clock_gating_state,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/smuio_v11_0.c |
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
#include "cikd.h"
#include "cik_sdma.h"
#include "gfx_v7_0.h"
#include "gca/gfx_7_2_d.h"
#include "gca/gfx_7_2_enum.h"
#include "gca/gfx_7_2_sh_mask.h"
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
#include "gmc/gmc_7_1_d.h"
#include "gmc/gmc_7_1_sh_mask.h"
#include "cik_structs.h"
enum hqd_dequeue_request_type {
NO_ACTION = 0,
DRAIN_PIPE,
RESET_WAVES
};
enum {
MAX_TRAPID = 8, /* 3 bits in the bitfield. */
MAX_WATCH_ADDRESSES = 4
};
static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
uint32_t queue, uint32_t vmid)
{
uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
mutex_lock(&adev->srbm_mutex);
WREG32(mmSRBM_GFX_CNTL, value);
}
static void unlock_srbm(struct amdgpu_device *adev)
{
WREG32(mmSRBM_GFX_CNTL, 0);
mutex_unlock(&adev->srbm_mutex);
}
static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
uint32_t queue_id)
{
uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
lock_srbm(adev, mec, pipe, queue_id, 0);
}
static void release_queue(struct amdgpu_device *adev)
{
unlock_srbm(adev);
}
static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid,
uint32_t sh_mem_config,
uint32_t sh_mem_ape1_base,
uint32_t sh_mem_ape1_limit,
uint32_t sh_mem_bases, uint32_t inst)
{
lock_srbm(adev, 0, 0, 0, vmid);
WREG32(mmSH_MEM_CONFIG, sh_mem_config);
WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
WREG32(mmSH_MEM_BASES, sh_mem_bases);
unlock_srbm(adev);
}
static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
unsigned int vmid, uint32_t inst)
{
/*
* We have to assume that there is no outstanding mapping.
* The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
* a mapping is in progress or because a mapping finished and the
* SW cleared it. So the protocol is to always wait & clear.
*/
uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
ATC_VMID0_PASID_MAPPING__VALID_MASK;
WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
cpu_relax();
WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
/* Mapping vmid to pasid also for IH block */
WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
return 0;
}
static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id,
uint32_t inst)
{
uint32_t mec;
uint32_t pipe;
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
lock_srbm(adev, mec, pipe, 0, 0);
WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
unlock_srbm(adev);
return 0;
}
static inline uint32_t get_sdma_rlc_reg_offset(struct cik_sdma_rlc_registers *m)
{
uint32_t retval;
retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n",
m->sdma_engine_id, m->sdma_queue_id, retval);
return retval;
}
static inline struct cik_mqd *get_mqd(void *mqd)
{
return (struct cik_mqd *)mqd;
}
static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
{
return (struct cik_sdma_rlc_registers *)mqd;
}
static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t __user *wptr, uint32_t wptr_shift,
uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst)
{
struct cik_mqd *m;
uint32_t *mqd_hqd;
uint32_t reg, wptr_val, data;
bool valid_wptr = false;
m = get_mqd(mqd);
acquire_queue(adev, pipe_id, queue_id);
/* HQD registers extend from CP_MQD_BASE_ADDR to CP_MQD_CONTROL. */
mqd_hqd = &m->cp_mqd_base_addr_lo;
for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++)
WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
/* Copy userspace write pointer value to register.
* Activate doorbell logic to monitor subsequent changes.
*/
data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
/* read_user_ptr may take the mm->mmap_lock.
* release srbm_mutex to avoid circular dependency between
* srbm_mutex->mmap_lock->reservation_ww_class_mutex->srbm_mutex.
*/
release_queue(adev);
valid_wptr = read_user_wptr(mm, wptr, wptr_val);
acquire_queue(adev, pipe_id, queue_id);
if (valid_wptr)
WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
WREG32(mmCP_HQD_ACTIVE, data);
release_queue(adev);
return 0;
}
static int kgd_hqd_dump(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst)
{
uint32_t i = 0, reg;
#define HQD_N_REGS (35+4)
#define DUMP_REG(addr) do { \
if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
break; \
(*dump)[i][0] = (addr) << 2; \
(*dump)[i++][1] = RREG32(addr); \
} while (0)
*dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
acquire_queue(adev, pipe_id, queue_id);
DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0);
DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1);
DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE2);
DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE3);
for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++)
DUMP_REG(reg);
release_queue(adev);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
return 0;
}
static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd,
uint32_t __user *wptr, struct mm_struct *mm)
{
struct cik_sdma_rlc_registers *m;
unsigned long end_jiffies;
uint32_t sdma_rlc_reg_offset;
uint32_t data;
m = get_sdma_mqd(mqd);
sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
end_jiffies = msecs_to_jiffies(2000) + jiffies;
while (true) {
data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
if (time_after(jiffies, end_jiffies)) {
pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
}
usleep_range(500, 1000);
}
data = REG_SET_FIELD(m->sdma_rlc_doorbell, SDMA0_RLC0_DOORBELL,
ENABLE, 1);
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
m->sdma_rlc_rb_rptr);
if (read_user_wptr(mm, wptr, data))
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, data);
else
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
m->sdma_rlc_rb_rptr);
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_VIRTUAL_ADDR,
m->sdma_rlc_virtual_addr);
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
m->sdma_rlc_rb_base_hi);
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdma_rlc_rb_rptr_addr_lo);
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdma_rlc_rb_rptr_addr_hi);
data = REG_SET_FIELD(m->sdma_rlc_rb_cntl, SDMA0_RLC0_RB_CNTL,
RB_ENABLE, 1);
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
return 0;
}
static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
uint32_t engine_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
{
uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET +
queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
uint32_t i = 0, reg;
#undef HQD_N_REGS
#define HQD_N_REGS (19+4)
*dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
DUMP_REG(sdma_offset + reg);
for (reg = mmSDMA0_RLC0_VIRTUAL_ADDR; reg <= mmSDMA0_RLC0_WATERMARK;
reg++)
DUMP_REG(sdma_offset + reg);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
return 0;
}
static bool kgd_hqd_is_occupied(struct amdgpu_device *adev,
uint64_t queue_address, uint32_t pipe_id,
uint32_t queue_id, uint32_t inst)
{
uint32_t act;
bool retval = false;
uint32_t low, high;
acquire_queue(adev, pipe_id, queue_id);
act = RREG32(mmCP_HQD_ACTIVE);
if (act) {
low = lower_32_bits(queue_address >> 8);
high = upper_32_bits(queue_address >> 8);
if (low == RREG32(mmCP_HQD_PQ_BASE) &&
high == RREG32(mmCP_HQD_PQ_BASE_HI))
retval = true;
}
release_queue(adev);
return retval;
}
static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd)
{
struct cik_sdma_rlc_registers *m;
uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
m = get_sdma_mqd(mqd);
sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
return true;
return false;
}
static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd,
enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id, uint32_t inst)
{
uint32_t temp;
enum hqd_dequeue_request_type type;
unsigned long flags, end_jiffies;
int retry;
if (amdgpu_in_reset(adev))
return -EIO;
acquire_queue(adev, pipe_id, queue_id);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
switch (reset_type) {
case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
type = DRAIN_PIPE;
break;
case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
type = RESET_WAVES;
break;
default:
type = DRAIN_PIPE;
break;
}
/* Workaround: If IQ timer is active and the wait time is close to or
* equal to 0, dequeueing is not safe. Wait until either the wait time
* is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
* cleared before continuing. Also, ensure wait times are set to at
* least 0x3.
*/
local_irq_save(flags);
preempt_disable();
retry = 5000; /* wait for 500 usecs at maximum */
while (true) {
temp = RREG32(mmCP_HQD_IQ_TIMER);
if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
pr_debug("HW is processing IQ\n");
goto loop;
}
if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
== 3) /* SEM-rearm is safe */
break;
/* Wait time 3 is safe for CP, but our MMIO read/write
* time is close to 1 microsecond, so check for 10 to
* leave more buffer room
*/
if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
>= 10)
break;
pr_debug("IQ timer is active\n");
} else
break;
loop:
if (!retry) {
pr_err("CP HQD IQ timer status time out\n");
break;
}
ndelay(100);
--retry;
}
retry = 1000;
while (true) {
temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
break;
pr_debug("Dequeue request is pending\n");
if (!retry) {
pr_err("CP HQD dequeue request time out\n");
break;
}
ndelay(100);
--retry;
}
local_irq_restore(flags);
preempt_enable();
WREG32(mmCP_HQD_DEQUEUE_REQUEST, type);
end_jiffies = (utimeout * HZ / 1000) + jiffies;
while (true) {
temp = RREG32(mmCP_HQD_ACTIVE);
if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
break;
if (time_after(jiffies, end_jiffies)) {
pr_err("cp queue preemption time out\n");
release_queue(adev);
return -ETIME;
}
usleep_range(500, 1000);
}
release_queue(adev);
return 0;
}
static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
unsigned int utimeout)
{
struct cik_sdma_rlc_registers *m;
uint32_t sdma_rlc_reg_offset;
uint32_t temp;
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
while (true) {
temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
if (time_after(jiffies, end_jiffies)) {
pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
}
usleep_range(500, 1000);
}
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
m->sdma_rlc_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
return 0;
}
static int kgd_wave_control_execute(struct amdgpu_device *adev,
uint32_t gfx_index_val,
uint32_t sq_cmd, uint32_t inst)
{
uint32_t data;
mutex_lock(&adev->grbm_idx_mutex);
WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
WREG32(mmSQ_CMD, sq_cmd);
/* Restore the GRBM_GFX_INDEX register */
data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK |
GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK;
WREG32(mmGRBM_GFX_INDEX, data);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
}
static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
uint8_t vmid, uint16_t *p_pasid)
{
uint32_t value;
value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
static void set_scratch_backing_va(struct amdgpu_device *adev,
uint64_t va, uint32_t vmid)
{
lock_srbm(adev, 0, 0, 0, vmid);
WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
unlock_srbm(adev);
}
static void set_vm_context_page_table_base(struct amdgpu_device *adev,
uint32_t vmid, uint64_t page_table_base)
{
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
pr_err("trying to set page table base for wrong VMID\n");
return;
}
WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8,
lower_32_bits(page_table_base));
}
/**
* read_vmid_from_vmfault_reg - read vmid from register
*
* adev: amdgpu_device pointer
* @vmid: vmid pointer
* read vmid from register (CIK).
*/
static uint32_t read_vmid_from_vmfault_reg(struct amdgpu_device *adev)
{
uint32_t status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
}
const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
.init_interrupts = kgd_init_interrupts,
.hqd_load = kgd_hqd_load,
.hqd_sdma_load = kgd_hqd_sdma_load,
.hqd_dump = kgd_hqd_dump,
.hqd_sdma_dump = kgd_hqd_sdma_dump,
.hqd_is_occupied = kgd_hqd_is_occupied,
.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
.hqd_destroy = kgd_hqd_destroy,
.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
.wave_control_execute = kgd_wave_control_execute,
.get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info,
.set_scratch_backing_va = set_scratch_backing_va,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
.read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "smuio_v9_0.h"
#include "smuio/smuio_9_0_offset.h"
#include "smuio/smuio_9_0_sh_mask.h"
static u32 smuio_v9_0_get_rom_index_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX);
}
static u32 smuio_v9_0_get_rom_data_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA);
}
static void smuio_v9_0_update_rom_clock_gating(struct amdgpu_device *adev, bool enable)
{
u32 def, data;
/* enable/disable ROM CG is not supported on APU */
if (adev->flags & AMD_IS_APU)
return;
def = data = RREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
else
data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
if (def != data)
WREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0, data);
}
static void smuio_v9_0_get_clock_gating_state(struct amdgpu_device *adev, u64 *flags)
{
u32 data;
/* CGTT_ROM_CLK_CTRL0 is not availabe for APUs */
if (adev->flags & AMD_IS_APU)
return;
data = RREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0);
if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
*flags |= AMD_CG_SUPPORT_ROM_MGCG;
}
const struct amdgpu_smuio_funcs smuio_v9_0_funcs = {
.get_rom_index_offset = smuio_v9_0_get_rom_index_offset,
.get_rom_data_offset = smuio_v9_0_get_rom_data_offset,
.update_rom_clock_gating = smuio_v9_0_update_rom_clock_gating,
.get_clock_gating_state = smuio_v9_0_get_clock_gating_state,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/smuio_v9_0.c |
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
* Copyright 2009 Jerome Glisse.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
* Jerome Glisse
*/
#include "amdgpu.h"
#include "atom.h"
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/acpi.h>
/*
* BIOS.
*/
#define AMD_VBIOS_SIGNATURE " 761295520"
#define AMD_VBIOS_SIGNATURE_OFFSET 0x30
#define AMD_VBIOS_SIGNATURE_SIZE sizeof(AMD_VBIOS_SIGNATURE)
#define AMD_VBIOS_SIGNATURE_END (AMD_VBIOS_SIGNATURE_OFFSET + AMD_VBIOS_SIGNATURE_SIZE)
#define AMD_IS_VALID_VBIOS(p) ((p)[0] == 0x55 && (p)[1] == 0xAA)
#define AMD_VBIOS_LENGTH(p) ((p)[2] << 9)
/* Check if current bios is an ATOM BIOS.
* Return true if it is ATOM BIOS. Otherwise, return false.
*/
static bool check_atom_bios(uint8_t *bios, size_t size)
{
uint16_t tmp, bios_header_start;
if (!bios || size < 0x49) {
DRM_INFO("vbios mem is null or mem size is wrong\n");
return false;
}
if (!AMD_IS_VALID_VBIOS(bios)) {
DRM_INFO("BIOS signature incorrect %x %x\n", bios[0], bios[1]);
return false;
}
bios_header_start = bios[0x48] | (bios[0x49] << 8);
if (!bios_header_start) {
DRM_INFO("Can't locate bios header\n");
return false;
}
tmp = bios_header_start + 4;
if (size < tmp) {
DRM_INFO("BIOS header is broken\n");
return false;
}
if (!memcmp(bios + tmp, "ATOM", 4) ||
!memcmp(bios + tmp, "MOTA", 4)) {
DRM_DEBUG("ATOMBIOS detected\n");
return true;
}
return false;
}
/* If you boot an IGP board with a discrete card as the primary,
* the IGP rom is not accessible via the rom bar as the IGP rom is
* part of the system bios. On boot, the system bios puts a
* copy of the igp rom at the start of vram if a discrete card is
* present.
*/
static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
{
uint8_t __iomem *bios;
resource_size_t vram_base;
resource_size_t size = 256 * 1024; /* ??? */
if (!(adev->flags & AMD_IS_APU))
if (amdgpu_device_need_post(adev))
return false;
/* FB BAR not enabled */
if (pci_resource_len(adev->pdev, 0) == 0)
return false;
adev->bios = NULL;
vram_base = pci_resource_start(adev->pdev, 0);
bios = ioremap_wc(vram_base, size);
if (!bios)
return false;
adev->bios = kmalloc(size, GFP_KERNEL);
if (!adev->bios) {
iounmap(bios);
return false;
}
adev->bios_size = size;
memcpy_fromio(adev->bios, bios, size);
iounmap(bios);
if (!check_atom_bios(adev->bios, size)) {
kfree(adev->bios);
return false;
}
return true;
}
bool amdgpu_read_bios(struct amdgpu_device *adev)
{
uint8_t __iomem *bios;
size_t size;
adev->bios = NULL;
/* XXX: some cards may return 0 for rom size? ddx has a workaround */
bios = pci_map_rom(adev->pdev, &size);
if (!bios)
return false;
adev->bios = kzalloc(size, GFP_KERNEL);
if (adev->bios == NULL) {
pci_unmap_rom(adev->pdev, bios);
return false;
}
adev->bios_size = size;
memcpy_fromio(adev->bios, bios, size);
pci_unmap_rom(adev->pdev, bios);
if (!check_atom_bios(adev->bios, size)) {
kfree(adev->bios);
return false;
}
return true;
}
static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
{
u8 header[AMD_VBIOS_SIGNATURE_END+1] = {0};
int len;
if (!adev->asic_funcs || !adev->asic_funcs->read_bios_from_rom)
return false;
/* validate VBIOS signature */
if (amdgpu_asic_read_bios_from_rom(adev, &header[0], sizeof(header)) == false)
return false;
header[AMD_VBIOS_SIGNATURE_END] = 0;
if ((!AMD_IS_VALID_VBIOS(header)) ||
memcmp((char *)&header[AMD_VBIOS_SIGNATURE_OFFSET],
AMD_VBIOS_SIGNATURE,
strlen(AMD_VBIOS_SIGNATURE)) != 0)
return false;
/* valid vbios, go on */
len = AMD_VBIOS_LENGTH(header);
len = ALIGN(len, 4);
adev->bios = kmalloc(len, GFP_KERNEL);
if (!adev->bios) {
DRM_ERROR("no memory to allocate for BIOS\n");
return false;
}
adev->bios_size = len;
/* read complete BIOS */
amdgpu_asic_read_bios_from_rom(adev, adev->bios, len);
if (!check_atom_bios(adev->bios, len)) {
kfree(adev->bios);
return false;
}
return true;
}
static bool amdgpu_read_platform_bios(struct amdgpu_device *adev)
{
phys_addr_t rom = adev->pdev->rom;
size_t romlen = adev->pdev->romlen;
void __iomem *bios;
adev->bios = NULL;
if (!rom || romlen == 0)
return false;
adev->bios = kzalloc(romlen, GFP_KERNEL);
if (!adev->bios)
return false;
bios = ioremap(rom, romlen);
if (!bios)
goto free_bios;
memcpy_fromio(adev->bios, bios, romlen);
iounmap(bios);
if (!check_atom_bios(adev->bios, romlen))
goto free_bios;
adev->bios_size = romlen;
return true;
free_bios:
kfree(adev->bios);
return false;
}
#ifdef CONFIG_ACPI
/* ATRM is used to get the BIOS on the discrete cards in
* dual-gpu systems.
*/
/* retrieve the ROM in 4k blocks */
#define ATRM_BIOS_PAGE 4096
/**
* amdgpu_atrm_call - fetch a chunk of the vbios
*
* @atrm_handle: acpi ATRM handle
* @bios: vbios image pointer
* @offset: offset of vbios image data to fetch
* @len: length of vbios image data to fetch
*
* Executes ATRM to fetch a chunk of the discrete
* vbios image on PX systems (all asics).
* Returns the length of the buffer fetched.
*/
static int amdgpu_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
int offset, int len)
{
acpi_status status;
union acpi_object atrm_arg_elements[2], *obj;
struct acpi_object_list atrm_arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
atrm_arg.count = 2;
atrm_arg.pointer = &atrm_arg_elements[0];
atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
atrm_arg_elements[0].integer.value = offset;
atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
atrm_arg_elements[1].integer.value = len;
status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
if (ACPI_FAILURE(status)) {
DRM_ERROR("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
return -ENODEV;
}
obj = (union acpi_object *)buffer.pointer;
memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
len = obj->buffer.length;
kfree(buffer.pointer);
return len;
}
static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
{
int ret;
int size = 256 * 1024;
int i;
struct pci_dev *pdev = NULL;
acpi_handle dhandle, atrm_handle;
acpi_status status;
bool found = false;
/* ATRM is for the discrete card only */
if (adev->flags & AMD_IS_APU)
return false;
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
continue;
status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
if (ACPI_SUCCESS(status)) {
found = true;
break;
}
}
if (!found) {
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
continue;
status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
if (ACPI_SUCCESS(status)) {
found = true;
break;
}
}
}
if (!found)
return false;
pci_dev_put(pdev);
adev->bios = kmalloc(size, GFP_KERNEL);
if (!adev->bios) {
dev_err(adev->dev, "Unable to allocate bios\n");
return false;
}
for (i = 0; i < size / ATRM_BIOS_PAGE; i++) {
ret = amdgpu_atrm_call(atrm_handle,
adev->bios,
(i * ATRM_BIOS_PAGE),
ATRM_BIOS_PAGE);
if (ret < ATRM_BIOS_PAGE)
break;
}
if (!check_atom_bios(adev->bios, size)) {
kfree(adev->bios);
return false;
}
adev->bios_size = size;
return true;
}
#else
static inline bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
{
return false;
}
#endif
static bool amdgpu_read_disabled_bios(struct amdgpu_device *adev)
{
if (adev->flags & AMD_IS_APU)
return igp_read_bios_from_vram(adev);
else
return (!adev->asic_funcs || !adev->asic_funcs->read_disabled_bios) ?
false : amdgpu_asic_read_disabled_bios(adev);
}
#ifdef CONFIG_ACPI
static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
{
struct acpi_table_header *hdr;
acpi_size tbl_size;
UEFI_ACPI_VFCT *vfct;
unsigned int offset;
if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr)))
return false;
tbl_size = hdr->length;
if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
dev_info(adev->dev, "ACPI VFCT table present but broken (too short #1),skipping\n");
return false;
}
vfct = (UEFI_ACPI_VFCT *)hdr;
offset = vfct->VBIOSImageOffset;
while (offset < tbl_size) {
GOP_VBIOS_CONTENT *vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + offset);
VFCT_IMAGE_HEADER *vhdr = &vbios->VbiosHeader;
offset += sizeof(VFCT_IMAGE_HEADER);
if (offset > tbl_size) {
dev_info(adev->dev, "ACPI VFCT image header truncated,skipping\n");
return false;
}
offset += vhdr->ImageLength;
if (offset > tbl_size) {
dev_info(adev->dev, "ACPI VFCT image truncated,skipping\n");
return false;
}
if (vhdr->ImageLength &&
vhdr->PCIBus == adev->pdev->bus->number &&
vhdr->PCIDevice == PCI_SLOT(adev->pdev->devfn) &&
vhdr->PCIFunction == PCI_FUNC(adev->pdev->devfn) &&
vhdr->VendorID == adev->pdev->vendor &&
vhdr->DeviceID == adev->pdev->device) {
adev->bios = kmemdup(&vbios->VbiosContent,
vhdr->ImageLength,
GFP_KERNEL);
if (!check_atom_bios(adev->bios, vhdr->ImageLength)) {
kfree(adev->bios);
return false;
}
adev->bios_size = vhdr->ImageLength;
return true;
}
}
dev_info(adev->dev, "ACPI VFCT table present but broken (too short #2),skipping\n");
return false;
}
#else
static inline bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
{
return false;
}
#endif
bool amdgpu_get_bios(struct amdgpu_device *adev)
{
if (amdgpu_atrm_get_bios(adev)) {
dev_info(adev->dev, "Fetched VBIOS from ATRM\n");
goto success;
}
if (amdgpu_acpi_vfct_bios(adev)) {
dev_info(adev->dev, "Fetched VBIOS from VFCT\n");
goto success;
}
if (igp_read_bios_from_vram(adev)) {
dev_info(adev->dev, "Fetched VBIOS from VRAM BAR\n");
goto success;
}
if (amdgpu_read_bios(adev)) {
dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n");
goto success;
}
if (amdgpu_read_bios_from_rom(adev)) {
dev_info(adev->dev, "Fetched VBIOS from ROM\n");
goto success;
}
if (amdgpu_read_disabled_bios(adev)) {
dev_info(adev->dev, "Fetched VBIOS from disabled ROM BAR\n");
goto success;
}
if (amdgpu_read_platform_bios(adev)) {
dev_info(adev->dev, "Fetched VBIOS from platform\n");
goto success;
}
dev_err(adev->dev, "Unable to locate a BIOS ROM\n");
return false;
success:
adev->is_atom_fw = adev->asic_type >= CHIP_VEGA10;
return true;
}
/* helper function for soc15 and onwards to read bios from rom */
bool amdgpu_soc15_read_bios_from_rom(struct amdgpu_device *adev,
u8 *bios, u32 length_bytes)
{
u32 *dw_ptr;
u32 i, length_dw;
u32 rom_offset;
u32 rom_index_offset;
u32 rom_data_offset;
if (bios == NULL)
return false;
if (length_bytes == 0)
return false;
/* APU vbios image is part of sbios image */
if (adev->flags & AMD_IS_APU)
return false;
if (!adev->smuio.funcs ||
!adev->smuio.funcs->get_rom_index_offset ||
!adev->smuio.funcs->get_rom_data_offset)
return false;
dw_ptr = (u32 *)bios;
length_dw = ALIGN(length_bytes, 4) / 4;
rom_index_offset =
adev->smuio.funcs->get_rom_index_offset(adev);
rom_data_offset =
adev->smuio.funcs->get_rom_data_offset(adev);
if (adev->nbio.funcs &&
adev->nbio.funcs->get_rom_offset) {
rom_offset = adev->nbio.funcs->get_rom_offset(adev);
rom_offset = rom_offset << 17;
} else {
rom_offset = 0;
}
/* set rom index to rom_offset */
WREG32(rom_index_offset, rom_offset);
/* read out the rom data */
for (i = 0; i < length_dw; i++)
dw_ptr[i] = RREG32(rom_data_offset);
return true;
}
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c |
/*
* Copyright 2011 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Alex Deucher
*
*/
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "atom.h"
#include "amdgpu_atombios.h"
#include "atombios_i2c.h"
#define TARGET_HW_I2C_CLOCK 50
/* these are a limitation of ProcessI2cChannelTransaction not the hw */
#define ATOM_MAX_HW_I2C_WRITE 3
#define ATOM_MAX_HW_I2C_READ 255
static int amdgpu_atombios_i2c_process_i2c_ch(struct amdgpu_i2c_chan *chan,
u8 slave_addr, u8 flags,
u8 *buf, u8 num)
{
struct drm_device *dev = chan->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
unsigned char *base;
u16 out = cpu_to_le16(0);
int r = 0;
memset(&args, 0, sizeof(args));
mutex_lock(&chan->mutex);
base = (unsigned char *)adev->mode_info.atom_context->scratch;
if (flags & HW_I2C_WRITE) {
if (num > ATOM_MAX_HW_I2C_WRITE) {
DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num);
r = -EINVAL;
goto done;
}
if (buf == NULL)
args.ucRegIndex = 0;
else
args.ucRegIndex = buf[0];
if (num)
num--;
if (num) {
if (buf) {
memcpy(&out, &buf[1], num);
} else {
DRM_ERROR("hw i2c: missing buf with num > 1\n");
r = -EINVAL;
goto done;
}
}
args.lpI2CDataOut = cpu_to_le16(out);
} else {
args.ucRegIndex = 0;
args.lpI2CDataOut = 0;
}
args.ucFlag = flags;
args.ucI2CSpeed = TARGET_HW_I2C_CLOCK;
args.ucTransBytes = num;
args.ucSlaveAddr = slave_addr << 1;
args.ucLineNumber = chan->rec.i2c_id;
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
/* error */
if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) {
DRM_DEBUG_KMS("hw_i2c error\n");
r = -EIO;
goto done;
}
if (!(flags & HW_I2C_WRITE))
amdgpu_atombios_copy_swap(buf, base, num, false);
done:
mutex_unlock(&chan->mutex);
return r;
}
int amdgpu_atombios_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msgs, int num)
{
struct amdgpu_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
struct i2c_msg *p;
int i, remaining, current_count, buffer_offset, max_bytes, ret;
u8 flags;
/* check for bus probe */
p = &msgs[0];
if ((num == 1) && (p->len == 0)) {
ret = amdgpu_atombios_i2c_process_i2c_ch(i2c,
p->addr, HW_I2C_WRITE,
NULL, 0);
if (ret)
return ret;
else
return num;
}
for (i = 0; i < num; i++) {
p = &msgs[i];
remaining = p->len;
buffer_offset = 0;
/* max_bytes are a limitation of ProcessI2cChannelTransaction not the hw */
if (p->flags & I2C_M_RD) {
max_bytes = ATOM_MAX_HW_I2C_READ;
flags = HW_I2C_READ;
} else {
max_bytes = ATOM_MAX_HW_I2C_WRITE;
flags = HW_I2C_WRITE;
}
while (remaining) {
if (remaining > max_bytes)
current_count = max_bytes;
else
current_count = remaining;
ret = amdgpu_atombios_i2c_process_i2c_ch(i2c,
p->addr, flags,
&p->buf[buffer_offset], current_count);
if (ret)
return ret;
remaining -= current_count;
buffer_offset += current_count;
}
}
return num;
}
u32 amdgpu_atombios_i2c_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
void amdgpu_atombios_i2c_channel_trans(struct amdgpu_device *adev, u8 slave_addr, u8 line_number, u8 offset, u8 data)
{
PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
args.ucRegIndex = offset;
args.lpI2CDataOut = data;
args.ucFlag = 1;
args.ucI2CSpeed = TARGET_HW_I2C_CLOCK;
args.ucTransBytes = 1;
args.ucSlaveAddr = slave_addr;
args.ucLineNumber = line_number;
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
}
| linux-master | drivers/gpu/drm/amd/amdgpu/atombios_i2c.c |
/*
* Copyright 2013 Advanced Micro Devices, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* Authors: Christian König <[email protected]>
*/
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_vce.h"
#include "cikd.h"
#include "vce/vce_2_0_d.h"
#include "vce/vce_2_0_sh_mask.h"
#include "smu/smu_7_0_1_d.h"
#include "smu/smu_7_0_1_sh_mask.h"
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
#define VCE_V2_0_FW_SIZE (256 * 1024)
#define VCE_V2_0_STACK_SIZE (64 * 1024)
#define VCE_V2_0_DATA_SIZE (23552 * AMDGPU_MAX_VCE_HANDLES)
#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev);
/**
* vce_v2_0_ring_get_rptr - get read pointer
*
* @ring: amdgpu_ring pointer
*
* Returns the current hardware read pointer
*/
static uint64_t vce_v2_0_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring->me == 0)
return RREG32(mmVCE_RB_RPTR);
else
return RREG32(mmVCE_RB_RPTR2);
}
/**
* vce_v2_0_ring_get_wptr - get write pointer
*
* @ring: amdgpu_ring pointer
*
* Returns the current hardware write pointer
*/
static uint64_t vce_v2_0_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring->me == 0)
return RREG32(mmVCE_RB_WPTR);
else
return RREG32(mmVCE_RB_WPTR2);
}
/**
* vce_v2_0_ring_set_wptr - set write pointer
*
* @ring: amdgpu_ring pointer
*
* Commits the write pointer to the hardware
*/
static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring->me == 0)
WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
else
WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
}
static int vce_v2_0_lmi_clean(struct amdgpu_device *adev)
{
int i, j;
for (i = 0; i < 10; ++i) {
for (j = 0; j < 100; ++j) {
uint32_t status = RREG32(mmVCE_LMI_STATUS);
if (status & 0x337f)
return 0;
mdelay(10);
}
}
return -ETIMEDOUT;
}
static int vce_v2_0_firmware_loaded(struct amdgpu_device *adev)
{
int i, j;
for (i = 0; i < 10; ++i) {
for (j = 0; j < 100; ++j) {
uint32_t status = RREG32(mmVCE_STATUS);
if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
return 0;
mdelay(10);
}
DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
WREG32_P(mmVCE_SOFT_RESET,
VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
mdelay(10);
WREG32_P(mmVCE_SOFT_RESET, 0,
~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
mdelay(10);
}
return -ETIMEDOUT;
}
static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
{
WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7);
}
static void vce_v2_0_init_cg(struct amdgpu_device *adev)
{
u32 tmp;
tmp = RREG32(mmVCE_CLOCK_GATING_A);
tmp &= ~0xfff;
tmp |= ((0 << 0) | (4 << 4));
tmp |= 0x40000;
WREG32(mmVCE_CLOCK_GATING_A, tmp);
tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
tmp &= ~0xfff;
tmp |= ((0 << 0) | (4 << 4));
WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
tmp = RREG32(mmVCE_CLOCK_GATING_B);
tmp |= 0x10;
tmp &= ~0x100000;
WREG32(mmVCE_CLOCK_GATING_B, tmp);
}
static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
{
uint32_t size, offset;
WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
WREG32(mmVCE_LMI_CTRL, 0x00398000);
WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
WREG32(mmVCE_LMI_SWAP_CNTL, 0);
WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
WREG32(mmVCE_LMI_VM_CTRL, 0);
WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
offset = AMDGPU_VCE_FIRMWARE_OFFSET;
size = VCE_V2_0_FW_SIZE;
WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
offset += size;
size = VCE_V2_0_STACK_SIZE;
WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
offset += size;
size = VCE_V2_0_DATA_SIZE;
WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
}
static bool vce_v2_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
}
static int vce_v2_0_wait_for_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
unsigned i;
for (i = 0; i < adev->usec_timeout; i++) {
if (vce_v2_0_is_idle(handle))
return 0;
}
return -ETIMEDOUT;
}
/**
* vce_v2_0_start - start VCE block
*
* @adev: amdgpu_device pointer
*
* Setup and start the VCE block
*/
static int vce_v2_0_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
int r;
/* set BUSY flag */
WREG32_P(mmVCE_STATUS, 1, ~1);
vce_v2_0_init_cg(adev);
vce_v2_0_disable_cg(adev);
vce_v2_0_mc_resume(adev);
ring = &adev->vce.ring[0];
WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
ring = &adev->vce.ring[1];
WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
mdelay(100);
WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
r = vce_v2_0_firmware_loaded(adev);
/* clear BUSY flag */
WREG32_P(mmVCE_STATUS, 0, ~1);
if (r) {
DRM_ERROR("VCE not responding, giving up!!!\n");
return r;
}
return 0;
}
static int vce_v2_0_stop(struct amdgpu_device *adev)
{
int i;
int status;
if (vce_v2_0_lmi_clean(adev)) {
DRM_INFO("vce is not idle \n");
return 0;
}
if (vce_v2_0_wait_for_idle(adev)) {
DRM_INFO("VCE is busy, Can't set clock gating");
return 0;
}
/* Stall UMC and register bus before resetting VCPU */
WREG32_P(mmVCE_LMI_CTRL2, 1 << 8, ~(1 << 8));
for (i = 0; i < 100; ++i) {
status = RREG32(mmVCE_LMI_STATUS);
if (status & 0x240)
break;
mdelay(1);
}
WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x80001);
/* put LMI, VCPU, RBC etc... into reset */
WREG32_P(mmVCE_SOFT_RESET, 1, ~0x1);
WREG32(mmVCE_STATUS, 0);
return 0;
}
static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated)
{
u32 tmp;
if (gated) {
tmp = RREG32(mmVCE_CLOCK_GATING_B);
tmp |= 0xe70000;
WREG32(mmVCE_CLOCK_GATING_B, tmp);
tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
tmp |= 0xff000000;
WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
tmp &= ~0x3fc;
WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
} else {
tmp = RREG32(mmVCE_CLOCK_GATING_B);
tmp |= 0xe7;
tmp &= ~0xe70000;
WREG32(mmVCE_CLOCK_GATING_B, tmp);
tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
tmp |= 0x1fe000;
tmp &= ~0xff000000;
WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
tmp |= 0x3fc;
WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
}
}
static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
{
u32 orig, tmp;
/* LMI_MC/LMI_UMC always set in dynamic,
* set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0}
*/
tmp = RREG32(mmVCE_CLOCK_GATING_B);
tmp &= ~0x00060006;
/* Exception for ECPU, IH, SEM, SYS blocks needs to be turned on/off by SW */
if (gated) {
tmp |= 0xe10000;
WREG32(mmVCE_CLOCK_GATING_B, tmp);
} else {
tmp |= 0xe1;
tmp &= ~0xe10000;
WREG32(mmVCE_CLOCK_GATING_B, tmp);
}
orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
tmp &= ~0x1fe000;
tmp &= ~0xff000000;
if (tmp != orig)
WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
tmp &= ~0x3fc;
if (tmp != orig)
WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
/* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */
WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00);
if(gated)
WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
}
static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable,
bool sw_cg)
{
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
if (sw_cg)
vce_v2_0_set_sw_cg(adev, true);
else
vce_v2_0_set_dyn_cg(adev, true);
} else {
vce_v2_0_disable_cg(adev);
if (sw_cg)
vce_v2_0_set_sw_cg(adev, false);
else
vce_v2_0_set_dyn_cg(adev, false);
}
}
static int vce_v2_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->vce.num_rings = 2;
vce_v2_0_set_ring_funcs(adev);
vce_v2_0_set_irq_funcs(adev);
return 0;
}
static int vce_v2_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* VCE */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 167, &adev->vce.irq);
if (r)
return r;
r = amdgpu_vce_sw_init(adev, VCE_V2_0_FW_SIZE +
VCE_V2_0_STACK_SIZE + VCE_V2_0_DATA_SIZE);
if (r)
return r;
r = amdgpu_vce_resume(adev);
if (r)
return r;
for (i = 0; i < adev->vce.num_rings; i++) {
enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i);
ring = &adev->vce.ring[i];
sprintf(ring->name, "vce%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
hw_prio, NULL);
if (r)
return r;
}
r = amdgpu_vce_entity_init(adev);
return r;
}
static int vce_v2_0_sw_fini(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = amdgpu_vce_suspend(adev);
if (r)
return r;
return amdgpu_vce_sw_fini(adev);
}
static int vce_v2_0_hw_init(void *handle)
{
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
vce_v2_0_enable_mgcg(adev, true, false);
for (i = 0; i < adev->vce.num_rings; i++) {
r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
if (r)
return r;
}
DRM_INFO("VCE initialized successfully.\n");
return 0;
}
static int vce_v2_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
cancel_delayed_work_sync(&adev->vce.idle_work);
return 0;
}
static int vce_v2_0_suspend(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/*
* Proper cleanups before halting the HW engine:
* - cancel the delayed idle work
* - enable powergating
* - enable clockgating
* - disable dpm
*
* TODO: to align with the VCN implementation, move the
* jobs for clockgating/powergating/dpm setting to
* ->set_powergating_state().
*/
cancel_delayed_work_sync(&adev->vce.idle_work);
if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_vce(adev, false);
} else {
amdgpu_asic_set_vce_clocks(adev, 0, 0);
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_GATE);
amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_GATE);
}
r = vce_v2_0_hw_fini(adev);
if (r)
return r;
return amdgpu_vce_suspend(adev);
}
static int vce_v2_0_resume(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = amdgpu_vce_resume(adev);
if (r)
return r;
return vce_v2_0_hw_init(adev);
}
static int vce_v2_0_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_VCE, 1);
mdelay(5);
return vce_v2_0_start(adev);
}
static int vce_v2_0_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
uint32_t val = 0;
if (state == AMDGPU_IRQ_STATE_ENABLE)
val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
return 0;
}
static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
DRM_DEBUG("IH: VCE\n");
switch (entry->src_data[0]) {
case 0:
case 1:
amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
break;
}
return 0;
}
static int vce_v2_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
bool gate = false;
bool sw_cg = false;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (state == AMD_CG_STATE_GATE) {
gate = true;
sw_cg = true;
}
vce_v2_0_enable_mgcg(adev, gate, sw_cg);
return 0;
}
static int vce_v2_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the VCE block.
* That's done in the dpm code via the SMC. This
* just re-inits the block as necessary. The actual
* gating still happens in the dpm code. We should
* revisit this when there is a cleaner line between
* the smc and the hw blocks
*/
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (state == AMD_PG_STATE_GATE)
return vce_v2_0_stop(adev);
else
return vce_v2_0_start(adev);
}
static const struct amd_ip_funcs vce_v2_0_ip_funcs = {
.name = "vce_v2_0",
.early_init = vce_v2_0_early_init,
.late_init = NULL,
.sw_init = vce_v2_0_sw_init,
.sw_fini = vce_v2_0_sw_fini,
.hw_init = vce_v2_0_hw_init,
.hw_fini = vce_v2_0_hw_fini,
.suspend = vce_v2_0_suspend,
.resume = vce_v2_0_resume,
.is_idle = vce_v2_0_is_idle,
.wait_for_idle = vce_v2_0_wait_for_idle,
.soft_reset = vce_v2_0_soft_reset,
.set_clockgating_state = vce_v2_0_set_clockgating_state,
.set_powergating_state = vce_v2_0_set_powergating_state,
};
static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
.type = AMDGPU_RING_TYPE_VCE,
.align_mask = 0xf,
.nop = VCE_CMD_NO_OP,
.support_64bit_ptrs = false,
.no_user_fence = true,
.get_rptr = vce_v2_0_ring_get_rptr,
.get_wptr = vce_v2_0_ring_get_wptr,
.set_wptr = vce_v2_0_ring_set_wptr,
.parse_cs = amdgpu_vce_ring_parse_cs,
.emit_frame_size = 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
.emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
.emit_ib = amdgpu_vce_ring_emit_ib,
.emit_fence = amdgpu_vce_ring_emit_fence,
.test_ring = amdgpu_vce_ring_test_ring,
.test_ib = amdgpu_vce_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vce_ring_begin_use,
.end_use = amdgpu_vce_ring_end_use,
};
static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
for (i = 0; i < adev->vce.num_rings; i++) {
adev->vce.ring[i].funcs = &vce_v2_0_ring_funcs;
adev->vce.ring[i].me = i;
}
}
static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = {
.set = vce_v2_0_set_interrupt_state,
.process = vce_v2_0_process_interrupt,
};
static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->vce.irq.num_types = 1;
adev->vce.irq.funcs = &vce_v2_0_irq_funcs;
};
const struct amdgpu_ip_block_version vce_v2_0_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_VCE,
.major = 2,
.minor = 0,
.rev = 0,
.funcs = &vce_v2_0_ip_funcs,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/vce_v2_0.c |
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Christian König <[email protected]>
*/
#include <linux/delay.h>
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_uvd.h"
#include "vid.h"
#include "uvd/uvd_5_0_d.h"
#include "uvd/uvd_5_0_sh_mask.h"
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
#include "bif/bif_5_0_d.h"
#include "vi.h"
#include "smu/smu_7_1_2_d.h"
#include "smu/smu_7_1_2_sh_mask.h"
#include "ivsrcid/ivsrcid_vislands30.h"
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
static int uvd_v5_0_start(struct amdgpu_device *adev);
static void uvd_v5_0_stop(struct amdgpu_device *adev);
static int uvd_v5_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state);
static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
bool enable);
/**
* uvd_v5_0_ring_get_rptr - get read pointer
*
* @ring: amdgpu_ring pointer
*
* Returns the current hardware read pointer
*/
static uint64_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
return RREG32(mmUVD_RBC_RB_RPTR);
}
/**
* uvd_v5_0_ring_get_wptr - get write pointer
*
* @ring: amdgpu_ring pointer
*
* Returns the current hardware write pointer
*/
static uint64_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
return RREG32(mmUVD_RBC_RB_WPTR);
}
/**
* uvd_v5_0_ring_set_wptr - set write pointer
*
* @ring: amdgpu_ring pointer
*
* Commits the write pointer to the hardware
*/
static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
}
static int uvd_v5_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->uvd.num_uvd_inst = 1;
uvd_v5_0_set_ring_funcs(adev);
uvd_v5_0_set_irq_funcs(adev);
return 0;
}
static int uvd_v5_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
/* UVD TRAP */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
if (r)
return r;
r = amdgpu_uvd_sw_init(adev);
if (r)
return r;
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
r = amdgpu_uvd_resume(adev);
if (r)
return r;
r = amdgpu_uvd_entity_init(adev);
return r;
}
static int uvd_v5_0_sw_fini(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = amdgpu_uvd_suspend(adev);
if (r)
return r;
return amdgpu_uvd_sw_fini(adev);
}
/**
* uvd_v5_0_hw_init - start and test UVD block
*
* @handle: handle used to pass amdgpu_device pointer
*
* Initialize the hardware, boot up the VCPU and do some testing
*/
static int uvd_v5_0_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t tmp;
int r;
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
uvd_v5_0_enable_mgcg(adev, true);
r = amdgpu_ring_test_helper(ring);
if (r)
goto done;
r = amdgpu_ring_alloc(ring, 10);
if (r) {
DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
goto done;
}
tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
amdgpu_ring_write(ring, tmp);
amdgpu_ring_write(ring, 0xFFFFF);
tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
amdgpu_ring_write(ring, tmp);
amdgpu_ring_write(ring, 0xFFFFF);
tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
amdgpu_ring_write(ring, tmp);
amdgpu_ring_write(ring, 0xFFFFF);
/* Clear timeout status bits */
amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
amdgpu_ring_write(ring, 0x8);
amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
amdgpu_ring_write(ring, 3);
amdgpu_ring_commit(ring);
done:
if (!r)
DRM_INFO("UVD initialized successfully.\n");
return r;
}
/**
* uvd_v5_0_hw_fini - stop the hardware block
*
* @handle: handle used to pass amdgpu_device pointer
*
* Stop the UVD block, mark ring as not ready any more
*/
static int uvd_v5_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
cancel_delayed_work_sync(&adev->uvd.idle_work);
if (RREG32(mmUVD_STATUS) != 0)
uvd_v5_0_stop(adev);
return 0;
}
static int uvd_v5_0_suspend(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/*
* Proper cleanups before halting the HW engine:
* - cancel the delayed idle work
* - enable powergating
* - enable clockgating
* - disable dpm
*
* TODO: to align with the VCN implementation, move the
* jobs for clockgating/powergating/dpm setting to
* ->set_powergating_state().
*/
cancel_delayed_work_sync(&adev->uvd.idle_work);
if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_uvd(adev, false);
} else {
amdgpu_asic_set_uvd_clocks(adev, 0, 0);
/* shutdown the UVD block */
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_GATE);
amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_GATE);
}
r = uvd_v5_0_hw_fini(adev);
if (r)
return r;
return amdgpu_uvd_suspend(adev);
}
static int uvd_v5_0_resume(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = amdgpu_uvd_resume(adev);
if (r)
return r;
return uvd_v5_0_hw_init(adev);
}
/**
* uvd_v5_0_mc_resume - memory controller programming
*
* @adev: amdgpu_device pointer
*
* Let the UVD memory controller know it's offsets
*/
static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
{
uint64_t offset;
uint32_t size;
/* program memory controller bits 0-27 */
WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
lower_32_bits(adev->uvd.inst->gpu_addr));
WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
upper_32_bits(adev->uvd.inst->gpu_addr));
offset = AMDGPU_UVD_FIRMWARE_OFFSET;
size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
offset += size;
size = AMDGPU_UVD_HEAP_SIZE;
WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
offset += size;
size = AMDGPU_UVD_STACK_SIZE +
(AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
}
/**
* uvd_v5_0_start - start UVD block
*
* @adev: amdgpu_device pointer
*
* Setup and start the UVD block
*/
static int uvd_v5_0_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t rb_bufsz, tmp;
uint32_t lmi_swap_cntl;
uint32_t mp_swap_cntl;
int i, j, r;
/*disable DPG */
WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2));
/* disable byte swapping */
lmi_swap_cntl = 0;
mp_swap_cntl = 0;
uvd_v5_0_mc_resume(adev);
/* disable interupt */
WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
/* stall UMC and register bus before resetting VCPU */
WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
mdelay(1);
/* put LMI, VCPU, RBC etc... into reset */
WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
mdelay(5);
/* take UVD block out of reset */
WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
mdelay(5);
/* initialize UVD memory controller */
WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
(1 << 21) | (1 << 9) | (1 << 20));
#ifdef __BIG_ENDIAN
/* swap (8 in 32) RB and IB */
lmi_swap_cntl = 0xa;
mp_swap_cntl = 0;
#endif
WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
WREG32(mmUVD_MPC_SET_ALU, 0);
WREG32(mmUVD_MPC_SET_MUX, 0x88);
/* take all subblocks out of reset, except VCPU */
WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
mdelay(5);
/* enable VCPU clock */
WREG32(mmUVD_VCPU_CNTL, 1 << 9);
/* enable UMC */
WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
/* boot up the VCPU */
WREG32(mmUVD_SOFT_RESET, 0);
mdelay(10);
for (i = 0; i < 10; ++i) {
uint32_t status;
for (j = 0; j < 100; ++j) {
status = RREG32(mmUVD_STATUS);
if (status & 2)
break;
mdelay(10);
}
r = 0;
if (status & 2)
break;
DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
mdelay(10);
WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
mdelay(10);
r = -1;
}
if (r) {
DRM_ERROR("UVD not responding, giving up!!!\n");
return r;
}
/* enable master interrupt */
WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1));
/* clear the bit 4 of UVD_STATUS */
WREG32_P(mmUVD_STATUS, 0, ~(2 << 1));
rb_bufsz = order_base_2(ring->ring_size);
tmp = 0;
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
/* force RBC into idle state */
WREG32(mmUVD_RBC_RB_CNTL, tmp);
/* set the write pointer delay */
WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
/* set the wb address */
WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
/* program the RB_BASE for ring buffer */
WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
upper_32_bits(ring->gpu_addr));
/* Initialize the ring buffer's read and write pointers */
WREG32(mmUVD_RBC_RB_RPTR, 0);
ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
return 0;
}
/**
* uvd_v5_0_stop - stop UVD block
*
* @adev: amdgpu_device pointer
*
* stop the UVD block
*/
static void uvd_v5_0_stop(struct amdgpu_device *adev)
{
/* force RBC into idle state */
WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
/* Stall UMC and register bus before resetting VCPU */
WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
mdelay(1);
/* put VCPU into reset */
WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
mdelay(5);
/* disable VCPU clock */
WREG32(mmUVD_VCPU_CNTL, 0x0);
/* Unstall UMC and register bus */
WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
WREG32(mmUVD_STATUS, 0);
}
/**
* uvd_v5_0_ring_emit_fence - emit an fence & trap command
*
* @ring: amdgpu_ring pointer
* @addr: address
* @seq: sequence number
* @flags: fence related flags
*
* Write a fence and a trap command to the ring.
*/
static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags)
{
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
amdgpu_ring_write(ring, seq);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
amdgpu_ring_write(ring, addr & 0xffffffff);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
amdgpu_ring_write(ring, 2);
}
/**
* uvd_v5_0_ring_test_ring - register write test
*
* @ring: amdgpu_ring pointer
*
* Test if we can successfully write to the context register
*/
static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
uint32_t tmp = 0;
unsigned i;
int r;
WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
if (r)
return r;
amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(mmUVD_CONTEXT_ID);
if (tmp == 0xDEADBEEF)
break;
udelay(1);
}
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
return r;
}
/**
* uvd_v5_0_ring_emit_ib - execute indirect buffer
*
* @ring: amdgpu_ring pointer
* @job: job to retrieve vmid from
* @ib: indirect buffer to execute
* @flags: unused
*
* Write ring commands to execute the indirect buffer
*/
static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_job *job,
struct amdgpu_ib *ib,
uint32_t flags)
{
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
amdgpu_ring_write(ring, ib->length_dw);
}
static void uvd_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
int i;
WARN_ON(ring->wptr % 2 || count % 2);
for (i = 0; i < count / 2; i++) {
amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
amdgpu_ring_write(ring, 0);
}
}
static bool uvd_v5_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
}
static int uvd_v5_0_wait_for_idle(void *handle)
{
unsigned i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
return 0;
}
return -ETIMEDOUT;
}
static int uvd_v5_0_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
uvd_v5_0_stop(adev);
WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
mdelay(5);
return uvd_v5_0_start(adev);
}
static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
// TODO
return 0;
}
static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
DRM_DEBUG("IH: UVD TRAP\n");
amdgpu_fence_process(&adev->uvd.inst->ring);
return 0;
}
static void uvd_v5_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
{
uint32_t data1, data3, suvd_flags;
data1 = RREG32(mmUVD_SUVD_CGC_GATE);
data3 = RREG32(mmUVD_CGC_GATE);
suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
UVD_SUVD_CGC_GATE__SIT_MASK |
UVD_SUVD_CGC_GATE__SMP_MASK |
UVD_SUVD_CGC_GATE__SCM_MASK |
UVD_SUVD_CGC_GATE__SDB_MASK;
if (enable) {
data3 |= (UVD_CGC_GATE__SYS_MASK |
UVD_CGC_GATE__UDEC_MASK |
UVD_CGC_GATE__MPEG2_MASK |
UVD_CGC_GATE__RBC_MASK |
UVD_CGC_GATE__LMI_MC_MASK |
UVD_CGC_GATE__IDCT_MASK |
UVD_CGC_GATE__MPRD_MASK |
UVD_CGC_GATE__MPC_MASK |
UVD_CGC_GATE__LBSI_MASK |
UVD_CGC_GATE__LRBBM_MASK |
UVD_CGC_GATE__UDEC_RE_MASK |
UVD_CGC_GATE__UDEC_CM_MASK |
UVD_CGC_GATE__UDEC_IT_MASK |
UVD_CGC_GATE__UDEC_DB_MASK |
UVD_CGC_GATE__UDEC_MP_MASK |
UVD_CGC_GATE__WCB_MASK |
UVD_CGC_GATE__JPEG_MASK |
UVD_CGC_GATE__SCPU_MASK);
/* only in pg enabled, we can gate clock to vcpu*/
if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
data3 |= UVD_CGC_GATE__VCPU_MASK;
data3 &= ~UVD_CGC_GATE__REGS_MASK;
data1 |= suvd_flags;
} else {
data3 = 0;
data1 = 0;
}
WREG32(mmUVD_SUVD_CGC_GATE, data1);
WREG32(mmUVD_CGC_GATE, data3);
}
static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
{
uint32_t data, data2;
data = RREG32(mmUVD_CGC_CTRL);
data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
(4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
UVD_CGC_CTRL__SYS_MODE_MASK |
UVD_CGC_CTRL__UDEC_MODE_MASK |
UVD_CGC_CTRL__MPEG2_MODE_MASK |
UVD_CGC_CTRL__REGS_MODE_MASK |
UVD_CGC_CTRL__RBC_MODE_MASK |
UVD_CGC_CTRL__LMI_MC_MODE_MASK |
UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
UVD_CGC_CTRL__IDCT_MODE_MASK |
UVD_CGC_CTRL__MPRD_MODE_MASK |
UVD_CGC_CTRL__MPC_MODE_MASK |
UVD_CGC_CTRL__LBSI_MODE_MASK |
UVD_CGC_CTRL__LRBBM_MODE_MASK |
UVD_CGC_CTRL__WCB_MODE_MASK |
UVD_CGC_CTRL__VCPU_MODE_MASK |
UVD_CGC_CTRL__JPEG_MODE_MASK |
UVD_CGC_CTRL__SCPU_MODE_MASK);
data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
WREG32(mmUVD_CGC_CTRL, data);
WREG32(mmUVD_SUVD_CGC_CTRL, data2);
}
#if 0
static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev)
{
uint32_t data, data1, cgc_flags, suvd_flags;
data = RREG32(mmUVD_CGC_GATE);
data1 = RREG32(mmUVD_SUVD_CGC_GATE);
cgc_flags = UVD_CGC_GATE__SYS_MASK |
UVD_CGC_GATE__UDEC_MASK |
UVD_CGC_GATE__MPEG2_MASK |
UVD_CGC_GATE__RBC_MASK |
UVD_CGC_GATE__LMI_MC_MASK |
UVD_CGC_GATE__IDCT_MASK |
UVD_CGC_GATE__MPRD_MASK |
UVD_CGC_GATE__MPC_MASK |
UVD_CGC_GATE__LBSI_MASK |
UVD_CGC_GATE__LRBBM_MASK |
UVD_CGC_GATE__UDEC_RE_MASK |
UVD_CGC_GATE__UDEC_CM_MASK |
UVD_CGC_GATE__UDEC_IT_MASK |
UVD_CGC_GATE__UDEC_DB_MASK |
UVD_CGC_GATE__UDEC_MP_MASK |
UVD_CGC_GATE__WCB_MASK |
UVD_CGC_GATE__VCPU_MASK |
UVD_CGC_GATE__SCPU_MASK;
suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
UVD_SUVD_CGC_GATE__SIT_MASK |
UVD_SUVD_CGC_GATE__SMP_MASK |
UVD_SUVD_CGC_GATE__SCM_MASK |
UVD_SUVD_CGC_GATE__SDB_MASK;
data |= cgc_flags;
data1 |= suvd_flags;
WREG32(mmUVD_CGC_GATE, data);
WREG32(mmUVD_SUVD_CGC_GATE, data1);
}
#endif
static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
bool enable)
{
u32 orig, data;
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
data |= 0xfff;
WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
orig = data = RREG32(mmUVD_CGC_CTRL);
data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
if (orig != data)
WREG32(mmUVD_CGC_CTRL, data);
} else {
data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
data &= ~0xfff;
WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
orig = data = RREG32(mmUVD_CGC_CTRL);
data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
if (orig != data)
WREG32(mmUVD_CGC_CTRL, data);
}
}
static int uvd_v5_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
/* wait for STATUS to clear */
if (uvd_v5_0_wait_for_idle(handle))
return -EBUSY;
uvd_v5_0_enable_clock_gating(adev, true);
/* enable HW gates because UVD is idle */
/* uvd_v5_0_set_hw_clock_gating(adev); */
} else {
uvd_v5_0_enable_clock_gating(adev, false);
}
uvd_v5_0_set_sw_clock_gating(adev);
return 0;
}
static int uvd_v5_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the UVD block.
* That's done in the dpm code via the SMC. This
* just re-inits the block as necessary. The actual
* gating still happens in the dpm code. We should
* revisit this when there is a cleaner line between
* the smc and the hw blocks
*/
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret = 0;
if (state == AMD_PG_STATE_GATE) {
uvd_v5_0_stop(adev);
} else {
ret = uvd_v5_0_start(adev);
if (ret)
goto out;
}
out:
return ret;
}
static void uvd_v5_0_get_clockgating_state(void *handle, u64 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int data;
mutex_lock(&adev->pm.mutex);
if (RREG32_SMC(ixCURRENT_PG_STATUS) &
CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
goto out;
}
/* AMD_CG_SUPPORT_UVD_MGCG */
data = RREG32(mmUVD_CGC_CTRL);
if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
*flags |= AMD_CG_SUPPORT_UVD_MGCG;
out:
mutex_unlock(&adev->pm.mutex);
}
static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
.name = "uvd_v5_0",
.early_init = uvd_v5_0_early_init,
.late_init = NULL,
.sw_init = uvd_v5_0_sw_init,
.sw_fini = uvd_v5_0_sw_fini,
.hw_init = uvd_v5_0_hw_init,
.hw_fini = uvd_v5_0_hw_fini,
.suspend = uvd_v5_0_suspend,
.resume = uvd_v5_0_resume,
.is_idle = uvd_v5_0_is_idle,
.wait_for_idle = uvd_v5_0_wait_for_idle,
.soft_reset = uvd_v5_0_soft_reset,
.set_clockgating_state = uvd_v5_0_set_clockgating_state,
.set_powergating_state = uvd_v5_0_set_powergating_state,
.get_clockgating_state = uvd_v5_0_get_clockgating_state,
};
static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
.type = AMDGPU_RING_TYPE_UVD,
.align_mask = 0xf,
.support_64bit_ptrs = false,
.no_user_fence = true,
.get_rptr = uvd_v5_0_ring_get_rptr,
.get_wptr = uvd_v5_0_ring_get_wptr,
.set_wptr = uvd_v5_0_ring_set_wptr,
.parse_cs = amdgpu_uvd_ring_parse_cs,
.emit_frame_size =
14, /* uvd_v5_0_ring_emit_fence x1 no user fence */
.emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */
.emit_ib = uvd_v5_0_ring_emit_ib,
.emit_fence = uvd_v5_0_ring_emit_fence,
.test_ring = uvd_v5_0_ring_test_ring,
.test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = uvd_v5_0_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
};
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
{
adev->uvd.inst->ring.funcs = &uvd_v5_0_ring_funcs;
}
static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = {
.set = uvd_v5_0_set_interrupt_state,
.process = uvd_v5_0_process_interrupt,
};
static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->uvd.inst->irq.num_types = 1;
adev->uvd.inst->irq.funcs = &uvd_v5_0_irq_funcs;
}
const struct amdgpu_ip_block_version uvd_v5_0_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_UVD,
.major = 5,
.minor = 0,
.rev = 0,
.funcs = &uvd_v5_0_ip_funcs,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c |
/*
* Copyright 2011 Red Hat Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
/*
* Authors:
* Jerome Glisse <[email protected]>
*/
/* Algorithm:
*
* We store the last allocated bo in "hole", we always try to allocate
* after the last allocated bo. Principle is that in a linear GPU ring
* progression was is after last is the oldest bo we allocated and thus
* the first one that should no longer be in use by the GPU.
*
* If it's not the case we skip over the bo after last to the closest
* done bo if such one exist. If none exist and we are not asked to
* block we report failure to allocate.
*
* If we are asked to block we wait on all the oldest fence of all
* rings. We just wait for any of those fence to complete.
*/
#include "amdgpu.h"
int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
struct amdgpu_sa_manager *sa_manager,
unsigned int size, u32 suballoc_align, u32 domain)
{
int r;
r = amdgpu_bo_create_kernel(adev, size, AMDGPU_GPU_PAGE_SIZE, domain,
&sa_manager->bo, &sa_manager->gpu_addr,
&sa_manager->cpu_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
return r;
}
memset(sa_manager->cpu_ptr, 0, size);
drm_suballoc_manager_init(&sa_manager->base, size, suballoc_align);
return r;
}
void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
struct amdgpu_sa_manager *sa_manager)
{
if (sa_manager->bo == NULL) {
dev_err(adev->dev, "no bo for sa manager\n");
return;
}
drm_suballoc_manager_fini(&sa_manager->base);
amdgpu_bo_free_kernel(&sa_manager->bo, &sa_manager->gpu_addr, &sa_manager->cpu_ptr);
}
int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
struct drm_suballoc **sa_bo,
unsigned int size)
{
struct drm_suballoc *sa = drm_suballoc_new(&sa_manager->base, size,
GFP_KERNEL, false, 0);
if (IS_ERR(sa)) {
*sa_bo = NULL;
return PTR_ERR(sa);
}
*sa_bo = sa;
return 0;
}
void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct drm_suballoc **sa_bo,
struct dma_fence *fence)
{
if (sa_bo == NULL || *sa_bo == NULL) {
return;
}
drm_suballoc_free(*sa_bo, fence);
*sa_bo = NULL;
}
#if defined(CONFIG_DEBUG_FS)
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m)
{
struct drm_printer p = drm_seq_file_printer(m);
drm_suballoc_dump_debug_info(&sa_manager->base, &p, sa_manager->gpu_addr);
}
#endif
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c |
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
* Copyright 2009 Jerome Glisse.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
* Jerome Glisse
*/
#include "amdgpu.h"
#include <drm/amdgpu_drm.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
#include "atom.h"
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include "amdgpu_amdkfd.h"
#include "amdgpu_gem.h"
#include "amdgpu_display.h"
#include "amdgpu_ras.h"
#include "amd_pcie.h"
void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
{
struct amdgpu_gpu_instance *gpu_instance;
int i;
mutex_lock(&mgpu_info.mutex);
for (i = 0; i < mgpu_info.num_gpu; i++) {
gpu_instance = &(mgpu_info.gpu_ins[i]);
if (gpu_instance->adev == adev) {
mgpu_info.gpu_ins[i] =
mgpu_info.gpu_ins[mgpu_info.num_gpu - 1];
mgpu_info.num_gpu--;
if (adev->flags & AMD_IS_APU)
mgpu_info.num_apu--;
else
mgpu_info.num_dgpu--;
break;
}
}
mutex_unlock(&mgpu_info.mutex);
}
/**
* amdgpu_driver_unload_kms - Main unload function for KMS.
*
* @dev: drm dev pointer
*
* This is the main unload function for KMS (all asics).
* Returns 0 on success.
*/
void amdgpu_driver_unload_kms(struct drm_device *dev)
{
struct amdgpu_device *adev = drm_to_adev(dev);
if (adev == NULL)
return;
amdgpu_unregister_gpu_instance(adev);
if (adev->rmmio == NULL)
return;
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_UNLOAD))
DRM_WARN("smart shift update failed\n");
amdgpu_acpi_fini(adev);
amdgpu_device_fini_hw(adev);
}
void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
{
struct amdgpu_gpu_instance *gpu_instance;
mutex_lock(&mgpu_info.mutex);
if (mgpu_info.num_gpu >= MAX_GPU_INSTANCE) {
DRM_ERROR("Cannot register more gpu instance\n");
mutex_unlock(&mgpu_info.mutex);
return;
}
gpu_instance = &(mgpu_info.gpu_ins[mgpu_info.num_gpu]);
gpu_instance->adev = adev;
gpu_instance->mgpu_fan_enabled = 0;
mgpu_info.num_gpu++;
if (adev->flags & AMD_IS_APU)
mgpu_info.num_apu++;
else
mgpu_info.num_dgpu++;
mutex_unlock(&mgpu_info.mutex);
}
/**
* amdgpu_driver_load_kms - Main load function for KMS.
*
* @adev: pointer to struct amdgpu_device
* @flags: device flags
*
* This is the main load function for KMS (all asics).
* Returns 0 on success, error on failure.
*/
int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
{
struct drm_device *dev;
int r, acpi_status;
dev = adev_to_drm(adev);
/* amdgpu_device_init should report only fatal error
* like memory allocation failure or iomapping failure,
* or memory manager initialization failure, it must
* properly initialize the GPU MC controller and permit
* VRAM allocation
*/
r = amdgpu_device_init(adev, flags);
if (r) {
dev_err(dev->dev, "Fatal error during GPU init\n");
goto out;
}
adev->pm.rpm_mode = AMDGPU_RUNPM_NONE;
if (amdgpu_device_supports_px(dev) &&
(amdgpu_runtime_pm != 0)) { /* enable PX as runtime mode */
adev->pm.rpm_mode = AMDGPU_RUNPM_PX;
dev_info(adev->dev, "Using ATPX for runtime pm\n");
} else if (amdgpu_device_supports_boco(dev) &&
(amdgpu_runtime_pm != 0)) { /* enable boco as runtime mode */
adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO;
dev_info(adev->dev, "Using BOCO for runtime pm\n");
} else if (amdgpu_device_supports_baco(dev) &&
(amdgpu_runtime_pm != 0)) {
switch (adev->asic_type) {
case CHIP_VEGA20:
case CHIP_ARCTURUS:
/* enable BACO as runpm mode if runpm=1 */
if (amdgpu_runtime_pm > 0)
adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
break;
case CHIP_VEGA10:
/* enable BACO as runpm mode if noretry=0 */
if (!adev->gmc.noretry)
adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
break;
default:
/* enable BACO as runpm mode on CI+ */
adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
break;
}
if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO)
dev_info(adev->dev, "Using BACO for runtime pm\n");
}
/* Call ACPI methods: require modeset init
* but failure is not fatal
*/
acpi_status = amdgpu_acpi_init(adev);
if (acpi_status)
dev_dbg(dev->dev, "Error during ACPI methods call\n");
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_LOAD))
DRM_WARN("smart shift update failed\n");
out:
if (r)
amdgpu_driver_unload_kms(dev);
return r;
}
static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
struct drm_amdgpu_query_fw *query_fw,
struct amdgpu_device *adev)
{
switch (query_fw->fw_type) {
case AMDGPU_INFO_FW_VCE:
fw_info->ver = adev->vce.fw_version;
fw_info->feature = adev->vce.fb_version;
break;
case AMDGPU_INFO_FW_UVD:
fw_info->ver = adev->uvd.fw_version;
fw_info->feature = 0;
break;
case AMDGPU_INFO_FW_VCN:
fw_info->ver = adev->vcn.fw_version;
fw_info->feature = 0;
break;
case AMDGPU_INFO_FW_GMC:
fw_info->ver = adev->gmc.fw_version;
fw_info->feature = 0;
break;
case AMDGPU_INFO_FW_GFX_ME:
fw_info->ver = adev->gfx.me_fw_version;
fw_info->feature = adev->gfx.me_feature_version;
break;
case AMDGPU_INFO_FW_GFX_PFP:
fw_info->ver = adev->gfx.pfp_fw_version;
fw_info->feature = adev->gfx.pfp_feature_version;
break;
case AMDGPU_INFO_FW_GFX_CE:
fw_info->ver = adev->gfx.ce_fw_version;
fw_info->feature = adev->gfx.ce_feature_version;
break;
case AMDGPU_INFO_FW_GFX_RLC:
fw_info->ver = adev->gfx.rlc_fw_version;
fw_info->feature = adev->gfx.rlc_feature_version;
break;
case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL:
fw_info->ver = adev->gfx.rlc_srlc_fw_version;
fw_info->feature = adev->gfx.rlc_srlc_feature_version;
break;
case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM:
fw_info->ver = adev->gfx.rlc_srlg_fw_version;
fw_info->feature = adev->gfx.rlc_srlg_feature_version;
break;
case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM:
fw_info->ver = adev->gfx.rlc_srls_fw_version;
fw_info->feature = adev->gfx.rlc_srls_feature_version;
break;
case AMDGPU_INFO_FW_GFX_RLCP:
fw_info->ver = adev->gfx.rlcp_ucode_version;
fw_info->feature = adev->gfx.rlcp_ucode_feature_version;
break;
case AMDGPU_INFO_FW_GFX_RLCV:
fw_info->ver = adev->gfx.rlcv_ucode_version;
fw_info->feature = adev->gfx.rlcv_ucode_feature_version;
break;
case AMDGPU_INFO_FW_GFX_MEC:
if (query_fw->index == 0) {
fw_info->ver = adev->gfx.mec_fw_version;
fw_info->feature = adev->gfx.mec_feature_version;
} else if (query_fw->index == 1) {
fw_info->ver = adev->gfx.mec2_fw_version;
fw_info->feature = adev->gfx.mec2_feature_version;
} else
return -EINVAL;
break;
case AMDGPU_INFO_FW_SMC:
fw_info->ver = adev->pm.fw_version;
fw_info->feature = 0;
break;
case AMDGPU_INFO_FW_TA:
switch (query_fw->index) {
case TA_FW_TYPE_PSP_XGMI:
fw_info->ver = adev->psp.xgmi_context.context.bin_desc.fw_version;
fw_info->feature = adev->psp.xgmi_context.context
.bin_desc.feature_version;
break;
case TA_FW_TYPE_PSP_RAS:
fw_info->ver = adev->psp.ras_context.context.bin_desc.fw_version;
fw_info->feature = adev->psp.ras_context.context
.bin_desc.feature_version;
break;
case TA_FW_TYPE_PSP_HDCP:
fw_info->ver = adev->psp.hdcp_context.context.bin_desc.fw_version;
fw_info->feature = adev->psp.hdcp_context.context
.bin_desc.feature_version;
break;
case TA_FW_TYPE_PSP_DTM:
fw_info->ver = adev->psp.dtm_context.context.bin_desc.fw_version;
fw_info->feature = adev->psp.dtm_context.context
.bin_desc.feature_version;
break;
case TA_FW_TYPE_PSP_RAP:
fw_info->ver = adev->psp.rap_context.context.bin_desc.fw_version;
fw_info->feature = adev->psp.rap_context.context
.bin_desc.feature_version;
break;
case TA_FW_TYPE_PSP_SECUREDISPLAY:
fw_info->ver = adev->psp.securedisplay_context.context.bin_desc.fw_version;
fw_info->feature =
adev->psp.securedisplay_context.context.bin_desc
.feature_version;
break;
default:
return -EINVAL;
}
break;
case AMDGPU_INFO_FW_SDMA:
if (query_fw->index >= adev->sdma.num_instances)
return -EINVAL;
fw_info->ver = adev->sdma.instance[query_fw->index].fw_version;
fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
break;
case AMDGPU_INFO_FW_SOS:
fw_info->ver = adev->psp.sos.fw_version;
fw_info->feature = adev->psp.sos.feature_version;
break;
case AMDGPU_INFO_FW_ASD:
fw_info->ver = adev->psp.asd_context.bin_desc.fw_version;
fw_info->feature = adev->psp.asd_context.bin_desc.feature_version;
break;
case AMDGPU_INFO_FW_DMCU:
fw_info->ver = adev->dm.dmcu_fw_version;
fw_info->feature = 0;
break;
case AMDGPU_INFO_FW_DMCUB:
fw_info->ver = adev->dm.dmcub_fw_version;
fw_info->feature = 0;
break;
case AMDGPU_INFO_FW_TOC:
fw_info->ver = adev->psp.toc.fw_version;
fw_info->feature = adev->psp.toc.feature_version;
break;
case AMDGPU_INFO_FW_CAP:
fw_info->ver = adev->psp.cap_fw_version;
fw_info->feature = adev->psp.cap_feature_version;
break;
case AMDGPU_INFO_FW_MES_KIQ:
fw_info->ver = adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK;
fw_info->feature = (adev->mes.kiq_version & AMDGPU_MES_FEAT_VERSION_MASK)
>> AMDGPU_MES_FEAT_VERSION_SHIFT;
break;
case AMDGPU_INFO_FW_MES:
fw_info->ver = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
fw_info->feature = (adev->mes.sched_version & AMDGPU_MES_FEAT_VERSION_MASK)
>> AMDGPU_MES_FEAT_VERSION_SHIFT;
break;
case AMDGPU_INFO_FW_IMU:
fw_info->ver = adev->gfx.imu_fw_version;
fw_info->feature = 0;
break;
default:
return -EINVAL;
}
return 0;
}
static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
struct drm_amdgpu_info *info,
struct drm_amdgpu_info_hw_ip *result)
{
uint32_t ib_start_alignment = 0;
uint32_t ib_size_alignment = 0;
enum amd_ip_block_type type;
unsigned int num_rings = 0;
unsigned int i, j;
if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
return -EINVAL;
switch (info->query_hw_ip.type) {
case AMDGPU_HW_IP_GFX:
type = AMD_IP_BLOCK_TYPE_GFX;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
if (adev->gfx.gfx_ring[i].sched.ready)
++num_rings;
ib_start_alignment = 32;
ib_size_alignment = 32;
break;
case AMDGPU_HW_IP_COMPUTE:
type = AMD_IP_BLOCK_TYPE_GFX;
for (i = 0; i < adev->gfx.num_compute_rings; i++)
if (adev->gfx.compute_ring[i].sched.ready)
++num_rings;
ib_start_alignment = 32;
ib_size_alignment = 32;
break;
case AMDGPU_HW_IP_DMA:
type = AMD_IP_BLOCK_TYPE_SDMA;
for (i = 0; i < adev->sdma.num_instances; i++)
if (adev->sdma.instance[i].ring.sched.ready)
++num_rings;
ib_start_alignment = 256;
ib_size_alignment = 4;
break;
case AMDGPU_HW_IP_UVD:
type = AMD_IP_BLOCK_TYPE_UVD;
for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
if (adev->uvd.harvest_config & (1 << i))
continue;
if (adev->uvd.inst[i].ring.sched.ready)
++num_rings;
}
ib_start_alignment = 64;
ib_size_alignment = 64;
break;
case AMDGPU_HW_IP_VCE:
type = AMD_IP_BLOCK_TYPE_VCE;
for (i = 0; i < adev->vce.num_rings; i++)
if (adev->vce.ring[i].sched.ready)
++num_rings;
ib_start_alignment = 4;
ib_size_alignment = 1;
break;
case AMDGPU_HW_IP_UVD_ENC:
type = AMD_IP_BLOCK_TYPE_UVD;
for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
if (adev->uvd.harvest_config & (1 << i))
continue;
for (j = 0; j < adev->uvd.num_enc_rings; j++)
if (adev->uvd.inst[i].ring_enc[j].sched.ready)
++num_rings;
}
ib_start_alignment = 64;
ib_size_alignment = 64;
break;
case AMDGPU_HW_IP_VCN_DEC:
type = AMD_IP_BLOCK_TYPE_VCN;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (adev->vcn.harvest_config & (1 << i))
continue;
if (adev->vcn.inst[i].ring_dec.sched.ready)
++num_rings;
}
ib_start_alignment = 16;
ib_size_alignment = 16;
break;
case AMDGPU_HW_IP_VCN_ENC:
type = AMD_IP_BLOCK_TYPE_VCN;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (adev->vcn.harvest_config & (1 << i))
continue;
for (j = 0; j < adev->vcn.num_enc_rings; j++)
if (adev->vcn.inst[i].ring_enc[j].sched.ready)
++num_rings;
}
ib_start_alignment = 64;
ib_size_alignment = 1;
break;
case AMDGPU_HW_IP_VCN_JPEG:
type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
if (adev->jpeg.harvest_config & (1 << i))
continue;
for (j = 0; j < adev->jpeg.num_jpeg_rings; j++)
if (adev->jpeg.inst[i].ring_dec[j].sched.ready)
++num_rings;
}
ib_start_alignment = 16;
ib_size_alignment = 16;
break;
default:
return -EINVAL;
}
for (i = 0; i < adev->num_ip_blocks; i++)
if (adev->ip_blocks[i].version->type == type &&
adev->ip_blocks[i].status.valid)
break;
if (i == adev->num_ip_blocks)
return 0;
num_rings = min(amdgpu_ctx_num_entities[info->query_hw_ip.type],
num_rings);
result->hw_ip_version_major = adev->ip_blocks[i].version->major;
result->hw_ip_version_minor = adev->ip_blocks[i].version->minor;
if (adev->asic_type >= CHIP_VEGA10) {
switch (type) {
case AMD_IP_BLOCK_TYPE_GFX:
result->ip_discovery_version = adev->ip_versions[GC_HWIP][0];
break;
case AMD_IP_BLOCK_TYPE_SDMA:
result->ip_discovery_version = adev->ip_versions[SDMA0_HWIP][0];
break;
case AMD_IP_BLOCK_TYPE_UVD:
case AMD_IP_BLOCK_TYPE_VCN:
case AMD_IP_BLOCK_TYPE_JPEG:
result->ip_discovery_version = adev->ip_versions[UVD_HWIP][0];
break;
case AMD_IP_BLOCK_TYPE_VCE:
result->ip_discovery_version = adev->ip_versions[VCE_HWIP][0];
break;
default:
result->ip_discovery_version = 0;
break;
}
} else {
result->ip_discovery_version = 0;
}
result->capabilities_flags = 0;
result->available_rings = (1 << num_rings) - 1;
result->ib_start_alignment = ib_start_alignment;
result->ib_size_alignment = ib_size_alignment;
return 0;
}
/*
* Userspace get information ioctl
*/
/**
* amdgpu_info_ioctl - answer a device specific request.
*
* @dev: drm device pointer
* @data: request object
* @filp: drm filp
*
* This function is used to pass device specific parameters to the userspace
* drivers. Examples include: pci device id, pipeline parms, tiling params,
* etc. (all asics).
* Returns 0 on success, -EINVAL on failure.
*/
int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_amdgpu_info *info = data;
struct amdgpu_mode_info *minfo = &adev->mode_info;
void __user *out = (void __user *)(uintptr_t)info->return_pointer;
uint32_t size = info->return_size;
struct drm_crtc *crtc;
uint32_t ui32 = 0;
uint64_t ui64 = 0;
int i, found;
int ui32_size = sizeof(ui32);
if (!info->return_size || !info->return_pointer)
return -EINVAL;
switch (info->query) {
case AMDGPU_INFO_ACCEL_WORKING:
ui32 = adev->accel_working;
return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
case AMDGPU_INFO_CRTC_FROM_ID:
for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) {
crtc = (struct drm_crtc *)minfo->crtcs[i];
if (crtc && crtc->base.id == info->mode_crtc.id) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
ui32 = amdgpu_crtc->crtc_id;
found = 1;
break;
}
}
if (!found) {
DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id);
return -EINVAL;
}
return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
case AMDGPU_INFO_HW_IP_INFO: {
struct drm_amdgpu_info_hw_ip ip = {};
int ret;
ret = amdgpu_hw_ip_info(adev, info, &ip);
if (ret)
return ret;
ret = copy_to_user(out, &ip, min_t(size_t, size, sizeof(ip)));
return ret ? -EFAULT : 0;
}
case AMDGPU_INFO_HW_IP_COUNT: {
enum amd_ip_block_type type;
uint32_t count = 0;
switch (info->query_hw_ip.type) {
case AMDGPU_HW_IP_GFX:
type = AMD_IP_BLOCK_TYPE_GFX;
break;
case AMDGPU_HW_IP_COMPUTE:
type = AMD_IP_BLOCK_TYPE_GFX;
break;
case AMDGPU_HW_IP_DMA:
type = AMD_IP_BLOCK_TYPE_SDMA;
break;
case AMDGPU_HW_IP_UVD:
type = AMD_IP_BLOCK_TYPE_UVD;
break;
case AMDGPU_HW_IP_VCE:
type = AMD_IP_BLOCK_TYPE_VCE;
break;
case AMDGPU_HW_IP_UVD_ENC:
type = AMD_IP_BLOCK_TYPE_UVD;
break;
case AMDGPU_HW_IP_VCN_DEC:
case AMDGPU_HW_IP_VCN_ENC:
type = AMD_IP_BLOCK_TYPE_VCN;
break;
case AMDGPU_HW_IP_VCN_JPEG:
type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
break;
default:
return -EINVAL;
}
for (i = 0; i < adev->num_ip_blocks; i++)
if (adev->ip_blocks[i].version->type == type &&
adev->ip_blocks[i].status.valid &&
count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
count++;
return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
}
case AMDGPU_INFO_TIMESTAMP:
ui64 = amdgpu_gfx_get_gpu_clock_counter(adev);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_FW_VERSION: {
struct drm_amdgpu_info_firmware fw_info;
int ret;
/* We only support one instance of each IP block right now. */
if (info->query_fw.ip_instance != 0)
return -EINVAL;
ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev);
if (ret)
return ret;
return copy_to_user(out, &fw_info,
min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
}
case AMDGPU_INFO_NUM_BYTES_MOVED:
ui64 = atomic64_read(&adev->num_bytes_moved);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_NUM_EVICTIONS:
ui64 = atomic64_read(&adev->num_evictions);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS:
ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VRAM_USAGE:
ui64 = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VIS_VRAM_USAGE:
ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_GTT_USAGE:
ui64 = ttm_resource_manager_usage(&adev->mman.gtt_mgr.manager);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_GDS_CONFIG: {
struct drm_amdgpu_info_gds gds_info;
memset(&gds_info, 0, sizeof(gds_info));
gds_info.compute_partition_size = adev->gds.gds_size;
gds_info.gds_total_size = adev->gds.gds_size;
gds_info.gws_per_compute_partition = adev->gds.gws_size;
gds_info.oa_per_compute_partition = adev->gds.oa_size;
return copy_to_user(out, &gds_info,
min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0;
}
case AMDGPU_INFO_VRAM_GTT: {
struct drm_amdgpu_info_vram_gtt vram_gtt;
vram_gtt.vram_size = adev->gmc.real_vram_size -
atomic64_read(&adev->vram_pin_size) -
AMDGPU_VM_RESERVED_VRAM;
vram_gtt.vram_cpu_accessible_size =
min(adev->gmc.visible_vram_size -
atomic64_read(&adev->visible_pin_size),
vram_gtt.vram_size);
vram_gtt.gtt_size = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT)->size;
vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
return copy_to_user(out, &vram_gtt,
min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
}
case AMDGPU_INFO_MEMORY: {
struct drm_amdgpu_memory_info mem;
struct ttm_resource_manager *gtt_man =
&adev->mman.gtt_mgr.manager;
struct ttm_resource_manager *vram_man =
&adev->mman.vram_mgr.manager;
memset(&mem, 0, sizeof(mem));
mem.vram.total_heap_size = adev->gmc.real_vram_size;
mem.vram.usable_heap_size = adev->gmc.real_vram_size -
atomic64_read(&adev->vram_pin_size) -
AMDGPU_VM_RESERVED_VRAM;
mem.vram.heap_usage =
ttm_resource_manager_usage(vram_man);
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
mem.cpu_accessible_vram.total_heap_size =
adev->gmc.visible_vram_size;
mem.cpu_accessible_vram.usable_heap_size =
min(adev->gmc.visible_vram_size -
atomic64_read(&adev->visible_pin_size),
mem.vram.usable_heap_size);
mem.cpu_accessible_vram.heap_usage =
amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
mem.cpu_accessible_vram.max_allocation =
mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
mem.gtt.total_heap_size = gtt_man->size;
mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
atomic64_read(&adev->gart_pin_size);
mem.gtt.heap_usage = ttm_resource_manager_usage(gtt_man);
mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
return copy_to_user(out, &mem,
min((size_t)size, sizeof(mem)))
? -EFAULT : 0;
}
case AMDGPU_INFO_READ_MMR_REG: {
unsigned int n, alloc_size;
uint32_t *regs;
unsigned int se_num = (info->read_mmr_reg.instance >>
AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
AMDGPU_INFO_MMR_SE_INDEX_MASK;
unsigned int sh_num = (info->read_mmr_reg.instance >>
AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
AMDGPU_INFO_MMR_SH_INDEX_MASK;
/* set full masks if the userspace set all bits
* in the bitfields
*/
if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
se_num = 0xffffffff;
else if (se_num >= AMDGPU_GFX_MAX_SE)
return -EINVAL;
if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
sh_num = 0xffffffff;
else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE)
return -EINVAL;
if (info->read_mmr_reg.count > 128)
return -EINVAL;
regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
if (!regs)
return -ENOMEM;
alloc_size = info->read_mmr_reg.count * sizeof(*regs);
amdgpu_gfx_off_ctrl(adev, false);
for (i = 0; i < info->read_mmr_reg.count; i++) {
if (amdgpu_asic_read_register(adev, se_num, sh_num,
info->read_mmr_reg.dword_offset + i,
®s[i])) {
DRM_DEBUG_KMS("unallowed offset %#x\n",
info->read_mmr_reg.dword_offset + i);
kfree(regs);
amdgpu_gfx_off_ctrl(adev, true);
return -EFAULT;
}
}
amdgpu_gfx_off_ctrl(adev, true);
n = copy_to_user(out, regs, min(size, alloc_size));
kfree(regs);
return n ? -EFAULT : 0;
}
case AMDGPU_INFO_DEV_INFO: {
struct drm_amdgpu_info_device *dev_info;
uint64_t vm_size;
uint32_t pcie_gen_mask;
int ret;
dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
if (!dev_info)
return -ENOMEM;
dev_info->device_id = adev->pdev->device;
dev_info->chip_rev = adev->rev_id;
dev_info->external_rev = adev->external_rev_id;
dev_info->pci_rev = adev->pdev->revision;
dev_info->family = adev->family;
dev_info->num_shader_engines = adev->gfx.config.max_shader_engines;
dev_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
/* return all clocks in KHz */
dev_info->gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
if (adev->pm.dpm_enabled) {
dev_info->max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
dev_info->max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
dev_info->min_engine_clock = amdgpu_dpm_get_sclk(adev, true) * 10;
dev_info->min_memory_clock = amdgpu_dpm_get_mclk(adev, true) * 10;
} else {
dev_info->max_engine_clock =
dev_info->min_engine_clock =
adev->clock.default_sclk * 10;
dev_info->max_memory_clock =
dev_info->min_memory_clock =
adev->clock.default_mclk * 10;
}
dev_info->enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
dev_info->num_rb_pipes = adev->gfx.config.max_backends_per_se *
adev->gfx.config.max_shader_engines;
dev_info->num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
dev_info->ids_flags = 0;
if (adev->flags & AMD_IS_APU)
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
if (adev->gfx.mcbp)
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
if (amdgpu_is_tmz(adev))
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_TMZ;
if (adev->gfx.config.ta_cntl2_truncate_coord_mode)
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD;
vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
vm_size -= AMDGPU_VA_RESERVED_SIZE;
/* Older VCE FW versions are buggy and can handle only 40bits */
if (adev->vce.fw_version &&
adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
vm_size = min(vm_size, 1ULL << 40);
dev_info->virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
dev_info->virtual_address_max =
min(vm_size, AMDGPU_GMC_HOLE_START);
if (vm_size > AMDGPU_GMC_HOLE_START) {
dev_info->high_va_offset = AMDGPU_GMC_HOLE_END;
dev_info->high_va_max = AMDGPU_GMC_HOLE_END | vm_size;
}
dev_info->virtual_address_alignment = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
dev_info->pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
dev_info->gart_page_size = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
dev_info->cu_active_number = adev->gfx.cu_info.number;
dev_info->cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
dev_info->ce_ram_size = adev->gfx.ce_ram_size;
memcpy(&dev_info->cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0],
sizeof(adev->gfx.cu_info.ao_cu_bitmap));
memcpy(&dev_info->cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
sizeof(dev_info->cu_bitmap));
dev_info->vram_type = adev->gmc.vram_type;
dev_info->vram_bit_width = adev->gmc.vram_width;
dev_info->vce_harvest_config = adev->vce.harvest_config;
dev_info->gc_double_offchip_lds_buf =
adev->gfx.config.double_offchip_lds_buf;
dev_info->wave_front_size = adev->gfx.cu_info.wave_front_size;
dev_info->num_shader_visible_vgprs = adev->gfx.config.max_gprs;
dev_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
dev_info->num_tcc_blocks = adev->gfx.config.max_texture_channel_caches;
dev_info->gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth;
dev_info->gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth;
dev_info->max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads;
if (adev->family >= AMDGPU_FAMILY_NV)
dev_info->pa_sc_tile_steering_override =
adev->gfx.config.pa_sc_tile_steering_override;
dev_info->tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask;
/* Combine the chip gen mask with the platform (CPU/mobo) mask. */
pcie_gen_mask = adev->pm.pcie_gen_mask & (adev->pm.pcie_gen_mask >> 16);
dev_info->pcie_gen = fls(pcie_gen_mask);
dev_info->pcie_num_lanes =
adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 ? 32 :
adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 ? 16 :
adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 ? 12 :
adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 ? 8 :
adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 ? 4 :
adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 ? 2 : 1;
dev_info->tcp_cache_size = adev->gfx.config.gc_tcp_l1_size;
dev_info->num_sqc_per_wgp = adev->gfx.config.gc_num_sqc_per_wgp;
dev_info->sqc_data_cache_size = adev->gfx.config.gc_l1_data_cache_size_per_sqc;
dev_info->sqc_inst_cache_size = adev->gfx.config.gc_l1_instruction_cache_size_per_sqc;
dev_info->gl1c_cache_size = adev->gfx.config.gc_gl1c_size_per_instance *
adev->gfx.config.gc_gl1c_per_sa;
dev_info->gl2c_cache_size = adev->gfx.config.gc_gl2c_per_gpu;
dev_info->mall_size = adev->gmc.mall_size;
if (adev->gfx.funcs->get_gfx_shadow_info) {
struct amdgpu_gfx_shadow_info shadow_info;
ret = amdgpu_gfx_get_gfx_shadow_info(adev, &shadow_info);
if (!ret) {
dev_info->shadow_size = shadow_info.shadow_size;
dev_info->shadow_alignment = shadow_info.shadow_alignment;
dev_info->csa_size = shadow_info.csa_size;
dev_info->csa_alignment = shadow_info.csa_alignment;
}
}
ret = copy_to_user(out, dev_info,
min((size_t)size, sizeof(*dev_info))) ? -EFAULT : 0;
kfree(dev_info);
return ret;
}
case AMDGPU_INFO_VCE_CLOCK_TABLE: {
unsigned int i;
struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
struct amd_vce_state *vce_state;
for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
if (vce_state) {
vce_clk_table.entries[i].sclk = vce_state->sclk;
vce_clk_table.entries[i].mclk = vce_state->mclk;
vce_clk_table.entries[i].eclk = vce_state->evclk;
vce_clk_table.num_valid_entries++;
}
}
return copy_to_user(out, &vce_clk_table,
min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
}
case AMDGPU_INFO_VBIOS: {
uint32_t bios_size = adev->bios_size;
switch (info->vbios_info.type) {
case AMDGPU_INFO_VBIOS_SIZE:
return copy_to_user(out, &bios_size,
min((size_t)size, sizeof(bios_size)))
? -EFAULT : 0;
case AMDGPU_INFO_VBIOS_IMAGE: {
uint8_t *bios;
uint32_t bios_offset = info->vbios_info.offset;
if (bios_offset >= bios_size)
return -EINVAL;
bios = adev->bios + bios_offset;
return copy_to_user(out, bios,
min((size_t)size, (size_t)(bios_size - bios_offset)))
? -EFAULT : 0;
}
case AMDGPU_INFO_VBIOS_INFO: {
struct drm_amdgpu_info_vbios vbios_info = {};
struct atom_context *atom_context;
atom_context = adev->mode_info.atom_context;
if (atom_context) {
memcpy(vbios_info.name, atom_context->name,
sizeof(atom_context->name));
memcpy(vbios_info.vbios_pn, atom_context->vbios_pn,
sizeof(atom_context->vbios_pn));
vbios_info.version = atom_context->version;
memcpy(vbios_info.vbios_ver_str, atom_context->vbios_ver_str,
sizeof(atom_context->vbios_ver_str));
memcpy(vbios_info.date, atom_context->date,
sizeof(atom_context->date));
}
return copy_to_user(out, &vbios_info,
min((size_t)size, sizeof(vbios_info))) ? -EFAULT : 0;
}
default:
DRM_DEBUG_KMS("Invalid request %d\n",
info->vbios_info.type);
return -EINVAL;
}
}
case AMDGPU_INFO_NUM_HANDLES: {
struct drm_amdgpu_info_num_handles handle;
switch (info->query_hw_ip.type) {
case AMDGPU_HW_IP_UVD:
/* Starting Polaris, we support unlimited UVD handles */
if (adev->asic_type < CHIP_POLARIS10) {
handle.uvd_max_handles = adev->uvd.max_handles;
handle.uvd_used_handles = amdgpu_uvd_used_handles(adev);
return copy_to_user(out, &handle,
min((size_t)size, sizeof(handle))) ? -EFAULT : 0;
} else {
return -ENODATA;
}
break;
default:
return -EINVAL;
}
}
case AMDGPU_INFO_SENSOR: {
if (!adev->pm.dpm_enabled)
return -ENOENT;
switch (info->sensor_info.type) {
case AMDGPU_INFO_SENSOR_GFX_SCLK:
/* get sclk in Mhz */
if (amdgpu_dpm_read_sensor(adev,
AMDGPU_PP_SENSOR_GFX_SCLK,
(void *)&ui32, &ui32_size)) {
return -EINVAL;
}
ui32 /= 100;
break;
case AMDGPU_INFO_SENSOR_GFX_MCLK:
/* get mclk in Mhz */
if (amdgpu_dpm_read_sensor(adev,
AMDGPU_PP_SENSOR_GFX_MCLK,
(void *)&ui32, &ui32_size)) {
return -EINVAL;
}
ui32 /= 100;
break;
case AMDGPU_INFO_SENSOR_GPU_TEMP:
/* get temperature in millidegrees C */
if (amdgpu_dpm_read_sensor(adev,
AMDGPU_PP_SENSOR_GPU_TEMP,
(void *)&ui32, &ui32_size)) {
return -EINVAL;
}
break;
case AMDGPU_INFO_SENSOR_GPU_LOAD:
/* get GPU load */
if (amdgpu_dpm_read_sensor(adev,
AMDGPU_PP_SENSOR_GPU_LOAD,
(void *)&ui32, &ui32_size)) {
return -EINVAL;
}
break;
case AMDGPU_INFO_SENSOR_GPU_AVG_POWER:
/* get average GPU power */
if (amdgpu_dpm_read_sensor(adev,
AMDGPU_PP_SENSOR_GPU_AVG_POWER,
(void *)&ui32, &ui32_size)) {
return -EINVAL;
}
ui32 >>= 8;
break;
case AMDGPU_INFO_SENSOR_VDDNB:
/* get VDDNB in millivolts */
if (amdgpu_dpm_read_sensor(adev,
AMDGPU_PP_SENSOR_VDDNB,
(void *)&ui32, &ui32_size)) {
return -EINVAL;
}
break;
case AMDGPU_INFO_SENSOR_VDDGFX:
/* get VDDGFX in millivolts */
if (amdgpu_dpm_read_sensor(adev,
AMDGPU_PP_SENSOR_VDDGFX,
(void *)&ui32, &ui32_size)) {
return -EINVAL;
}
break;
case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK:
/* get stable pstate sclk in Mhz */
if (amdgpu_dpm_read_sensor(adev,
AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
(void *)&ui32, &ui32_size)) {
return -EINVAL;
}
ui32 /= 100;
break;
case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK:
/* get stable pstate mclk in Mhz */
if (amdgpu_dpm_read_sensor(adev,
AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK,
(void *)&ui32, &ui32_size)) {
return -EINVAL;
}
ui32 /= 100;
break;
case AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_SCLK:
/* get peak pstate sclk in Mhz */
if (amdgpu_dpm_read_sensor(adev,
AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK,
(void *)&ui32, &ui32_size)) {
return -EINVAL;
}
ui32 /= 100;
break;
case AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_MCLK:
/* get peak pstate mclk in Mhz */
if (amdgpu_dpm_read_sensor(adev,
AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK,
(void *)&ui32, &ui32_size)) {
return -EINVAL;
}
ui32 /= 100;
break;
default:
DRM_DEBUG_KMS("Invalid request %d\n",
info->sensor_info.type);
return -EINVAL;
}
return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
}
case AMDGPU_INFO_VRAM_LOST_COUNTER:
ui32 = atomic_read(&adev->vram_lost_counter);
return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
case AMDGPU_INFO_RAS_ENABLED_FEATURES: {
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
uint64_t ras_mask;
if (!ras)
return -EINVAL;
ras_mask = (uint64_t)adev->ras_enabled << 32 | ras->features;
return copy_to_user(out, &ras_mask,
min_t(u64, size, sizeof(ras_mask))) ?
-EFAULT : 0;
}
case AMDGPU_INFO_VIDEO_CAPS: {
const struct amdgpu_video_codecs *codecs;
struct drm_amdgpu_info_video_caps *caps;
int r;
if (!adev->asic_funcs->query_video_codecs)
return -EINVAL;
switch (info->video_cap.type) {
case AMDGPU_INFO_VIDEO_CAPS_DECODE:
r = amdgpu_asic_query_video_codecs(adev, false, &codecs);
if (r)
return -EINVAL;
break;
case AMDGPU_INFO_VIDEO_CAPS_ENCODE:
r = amdgpu_asic_query_video_codecs(adev, true, &codecs);
if (r)
return -EINVAL;
break;
default:
DRM_DEBUG_KMS("Invalid request %d\n",
info->video_cap.type);
return -EINVAL;
}
caps = kzalloc(sizeof(*caps), GFP_KERNEL);
if (!caps)
return -ENOMEM;
for (i = 0; i < codecs->codec_count; i++) {
int idx = codecs->codec_array[i].codec_type;
switch (idx) {
case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2:
case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4:
case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1:
case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC:
case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC:
case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG:
case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9:
case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1:
caps->codec_info[idx].valid = 1;
caps->codec_info[idx].max_width =
codecs->codec_array[i].max_width;
caps->codec_info[idx].max_height =
codecs->codec_array[i].max_height;
caps->codec_info[idx].max_pixels_per_frame =
codecs->codec_array[i].max_pixels_per_frame;
caps->codec_info[idx].max_level =
codecs->codec_array[i].max_level;
break;
default:
break;
}
}
r = copy_to_user(out, caps,
min((size_t)size, sizeof(*caps))) ? -EFAULT : 0;
kfree(caps);
return r;
}
case AMDGPU_INFO_MAX_IBS: {
uint32_t max_ibs[AMDGPU_HW_IP_NUM];
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
max_ibs[i] = amdgpu_ring_max_ibs(i);
return copy_to_user(out, max_ibs,
min((size_t)size, sizeof(max_ibs))) ? -EFAULT : 0;
}
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
return -EINVAL;
}
return 0;
}
/*
* Outdated mess for old drm with Xorg being in charge (void function now).
*/
/**
* amdgpu_driver_lastclose_kms - drm callback for last close
*
* @dev: drm dev pointer
*
* Switch vga_switcheroo state after last close (all asics).
*/
void amdgpu_driver_lastclose_kms(struct drm_device *dev)
{
drm_fb_helper_lastclose(dev);
vga_switcheroo_process_delayed_switch();
}
/**
* amdgpu_driver_open_kms - drm callback for open
*
* @dev: drm dev pointer
* @file_priv: drm file
*
* On device open, init vm on cayman+ (all asics).
* Returns 0 on success, error on failure.
*/
int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
{
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv;
int r, pasid;
/* Ensure IB tests are run on ring */
flush_delayed_work(&adev->delayed_init_work);
if (amdgpu_ras_intr_triggered()) {
DRM_ERROR("RAS Intr triggered, device disabled!!");
return -EHWPOISON;
}
file_priv->driver_priv = NULL;
r = pm_runtime_get_sync(dev->dev);
if (r < 0)
goto pm_put;
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
if (unlikely(!fpriv)) {
r = -ENOMEM;
goto out_suspend;
}
pasid = amdgpu_pasid_alloc(16);
if (pasid < 0) {
dev_warn(adev->dev, "No more PASIDs available!");
pasid = 0;
}
r = amdgpu_xcp_open_device(adev, fpriv, file_priv);
if (r)
goto error_pasid;
r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id);
if (r)
goto error_pasid;
r = amdgpu_vm_set_pasid(adev, &fpriv->vm, pasid);
if (r)
goto error_vm;
fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
if (!fpriv->prt_va) {
r = -ENOMEM;
goto error_vm;
}
if (adev->gfx.mcbp) {
uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
&fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE);
if (r)
goto error_vm;
}
mutex_init(&fpriv->bo_list_lock);
idr_init_base(&fpriv->bo_list_handles, 1);
amdgpu_ctx_mgr_init(&fpriv->ctx_mgr, adev);
file_priv->driver_priv = fpriv;
goto out_suspend;
error_vm:
amdgpu_vm_fini(adev, &fpriv->vm);
error_pasid:
if (pasid) {
amdgpu_pasid_free(pasid);
amdgpu_vm_set_pasid(adev, &fpriv->vm, 0);
}
kfree(fpriv);
out_suspend:
pm_runtime_mark_last_busy(dev->dev);
pm_put:
pm_runtime_put_autosuspend(dev->dev);
return r;
}
/**
* amdgpu_driver_postclose_kms - drm callback for post close
*
* @dev: drm dev pointer
* @file_priv: drm file
*
* On device post close, tear down vm on cayman+ (all asics).
*/
void amdgpu_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv)
{
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_bo_list *list;
struct amdgpu_bo *pd;
u32 pasid;
int handle;
if (!fpriv)
return;
pm_runtime_get_sync(dev->dev);
if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL)
amdgpu_uvd_free_handles(adev, file_priv);
if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL)
amdgpu_vce_free_handles(adev, file_priv);
if (fpriv->csa_va) {
uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
WARN_ON(amdgpu_unmap_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
fpriv->csa_va, csa_addr));
fpriv->csa_va = NULL;
}
pasid = fpriv->vm.pasid;
pd = amdgpu_bo_ref(fpriv->vm.root.bo);
if (!WARN_ON(amdgpu_bo_reserve(pd, true))) {
amdgpu_vm_bo_del(adev, fpriv->prt_va);
amdgpu_bo_unreserve(pd);
}
amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
amdgpu_vm_fini(adev, &fpriv->vm);
if (pasid)
amdgpu_pasid_free_delayed(pd->tbo.base.resv, pasid);
amdgpu_bo_unref(&pd);
idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
amdgpu_bo_list_put(list);
idr_destroy(&fpriv->bo_list_handles);
mutex_destroy(&fpriv->bo_list_lock);
kfree(fpriv);
file_priv->driver_priv = NULL;
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
}
void amdgpu_driver_release_kms(struct drm_device *dev)
{
struct amdgpu_device *adev = drm_to_adev(dev);
amdgpu_device_fini_sw(adev);
pci_set_drvdata(adev->pdev, NULL);
}
/*
* VBlank related functions.
*/
/**
* amdgpu_get_vblank_counter_kms - get frame count
*
* @crtc: crtc to get the frame count from
*
* Gets the frame count on the requested crtc (all asics).
* Returns frame count on success, -EINVAL on failure.
*/
u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = crtc->index;
struct amdgpu_device *adev = drm_to_adev(dev);
int vpos, hpos, stat;
u32 count;
if (pipe >= adev->mode_info.num_crtc) {
DRM_ERROR("Invalid crtc %u\n", pipe);
return -EINVAL;
}
/* The hw increments its frame counter at start of vsync, not at start
* of vblank, as is required by DRM core vblank counter handling.
* Cook the hw count here to make it appear to the caller as if it
* incremented at start of vblank. We measure distance to start of
* vblank in vpos. vpos therefore will be >= 0 between start of vblank
* and start of vsync, so vpos >= 0 means to bump the hw frame counter
* result by 1 to give the proper appearance to caller.
*/
if (adev->mode_info.crtcs[pipe]) {
/* Repeat readout if needed to provide stable result if
* we cross start of vsync during the queries.
*/
do {
count = amdgpu_display_vblank_get_counter(adev, pipe);
/* Ask amdgpu_display_get_crtc_scanoutpos to return
* vpos as distance to start of vblank, instead of
* regular vertical scanout pos.
*/
stat = amdgpu_display_get_crtc_scanoutpos(
dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
&vpos, &hpos, NULL, NULL,
&adev->mode_info.crtcs[pipe]->base.hwmode);
} while (count != amdgpu_display_vblank_get_counter(adev, pipe));
if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
} else {
DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
pipe, vpos);
/* Bump counter if we are at >= leading edge of vblank,
* but before vsync where vpos would turn negative and
* the hw counter really increments.
*/
if (vpos >= 0)
count++;
}
} else {
/* Fallback to use value as is. */
count = amdgpu_display_vblank_get_counter(adev, pipe);
DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
}
return count;
}
/**
* amdgpu_enable_vblank_kms - enable vblank interrupt
*
* @crtc: crtc to enable vblank interrupt for
*
* Enable the interrupt on the requested crtc (all asics).
* Returns 0 on success, -EINVAL on failure.
*/
int amdgpu_enable_vblank_kms(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = crtc->index;
struct amdgpu_device *adev = drm_to_adev(dev);
int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
}
/**
* amdgpu_disable_vblank_kms - disable vblank interrupt
*
* @crtc: crtc to disable vblank interrupt for
*
* Disable the interrupt on the requested crtc (all asics).
*/
void amdgpu_disable_vblank_kms(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = crtc->index;
struct amdgpu_device *adev = drm_to_adev(dev);
int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
amdgpu_irq_put(adev, &adev->crtc_irq, idx);
}
/*
* Debugfs info
*/
#if defined(CONFIG_DEBUG_FS)
static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
{
struct amdgpu_device *adev = m->private;
struct drm_amdgpu_info_firmware fw_info;
struct drm_amdgpu_query_fw query_fw;
struct atom_context *ctx = adev->mode_info.atom_context;
uint8_t smu_program, smu_major, smu_minor, smu_debug;
int ret, i;
static const char *ta_fw_name[TA_FW_TYPE_MAX_INDEX] = {
#define TA_FW_NAME(type)[TA_FW_TYPE_PSP_##type] = #type
TA_FW_NAME(XGMI),
TA_FW_NAME(RAS),
TA_FW_NAME(HDCP),
TA_FW_NAME(DTM),
TA_FW_NAME(RAP),
TA_FW_NAME(SECUREDISPLAY),
#undef TA_FW_NAME
};
/* VCE */
query_fw.fw_type = AMDGPU_INFO_FW_VCE;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
/* UVD */
query_fw.fw_type = AMDGPU_INFO_FW_UVD;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
/* GMC */
query_fw.fw_type = AMDGPU_INFO_FW_GMC;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
/* ME */
query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
/* PFP */
query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
/* CE */
query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
/* RLC */
query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
/* RLC SAVE RESTORE LIST CNTL */
query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
/* RLC SAVE RESTORE LIST GPM MEM */
query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
/* RLC SAVE RESTORE LIST SRM MEM */
query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
/* RLCP */
query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLCP;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
seq_printf(m, "RLCP feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
/* RLCV */
query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLCV;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
seq_printf(m, "RLCV feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
/* MEC */
query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
query_fw.index = 0;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
/* MEC2 */
if (adev->gfx.mec2_fw) {
query_fw.index = 1;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
}
/* IMU */
query_fw.fw_type = AMDGPU_INFO_FW_IMU;
query_fw.index = 0;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
seq_printf(m, "IMU feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
/* PSP SOS */
query_fw.fw_type = AMDGPU_INFO_FW_SOS;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
seq_printf(m, "SOS feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
/* PSP ASD */
query_fw.fw_type = AMDGPU_INFO_FW_ASD;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
query_fw.fw_type = AMDGPU_INFO_FW_TA;
for (i = TA_FW_TYPE_PSP_XGMI; i < TA_FW_TYPE_MAX_INDEX; i++) {
query_fw.index = i;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
continue;
seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
ta_fw_name[i], fw_info.feature, fw_info.ver);
}
/* SMC */
query_fw.fw_type = AMDGPU_INFO_FW_SMC;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
smu_program = (fw_info.ver >> 24) & 0xff;
smu_major = (fw_info.ver >> 16) & 0xff;
smu_minor = (fw_info.ver >> 8) & 0xff;
smu_debug = (fw_info.ver >> 0) & 0xff;
seq_printf(m, "SMC feature version: %u, program: %d, firmware version: 0x%08x (%d.%d.%d)\n",
fw_info.feature, smu_program, fw_info.ver, smu_major, smu_minor, smu_debug);
/* SDMA */
query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
for (i = 0; i < adev->sdma.num_instances; i++) {
query_fw.index = i;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
i, fw_info.feature, fw_info.ver);
}
/* VCN */
query_fw.fw_type = AMDGPU_INFO_FW_VCN;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
/* DMCU */
query_fw.fw_type = AMDGPU_INFO_FW_DMCU;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
/* DMCUB */
query_fw.fw_type = AMDGPU_INFO_FW_DMCUB;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
seq_printf(m, "DMCUB feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
/* TOC */
query_fw.fw_type = AMDGPU_INFO_FW_TOC;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
seq_printf(m, "TOC feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
/* CAP */
if (adev->psp.cap_fw) {
query_fw.fw_type = AMDGPU_INFO_FW_CAP;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
seq_printf(m, "CAP feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
}
/* MES_KIQ */
query_fw.fw_type = AMDGPU_INFO_FW_MES_KIQ;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
seq_printf(m, "MES_KIQ feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
/* MES */
query_fw.fw_type = AMDGPU_INFO_FW_MES;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
seq_printf(m, "MES feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
seq_printf(m, "VBIOS version: %s\n", ctx->vbios_pn);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_firmware_info);
#endif
void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
struct drm_minor *minor = adev_to_drm(adev)->primary;
struct dentry *root = minor->debugfs_root;
debugfs_create_file("amdgpu_firmware_info", 0444, root,
adev, &amdgpu_debugfs_firmware_info_fops);
#endif
}
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "amdgpu_psp.h"
#include "amdgpu_ucode.h"
#include "soc15_common.h"
#include "psp_v13_0_4.h"
#include "mp/mp_13_0_4_offset.h"
#include "mp/mp_13_0_4_sh_mask.h"
MODULE_FIRMWARE("amdgpu/psp_13_0_4_toc.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_4_ta.bin");
static int psp_v13_0_4_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
char ucode_prefix[30];
int err = 0;
amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
switch (adev->ip_versions[MP0_HWIP][0]) {
case IP_VERSION(13, 0, 4):
err = psp_init_toc_microcode(psp, ucode_prefix);
if (err)
return err;
err = psp_init_ta_microcode(psp, ucode_prefix);
if (err)
return err;
break;
default:
BUG();
}
return 0;
}
static bool psp_v13_0_4_is_sos_alive(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
uint32_t sol_reg;
sol_reg = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
return sol_reg != 0x0;
}
static int psp_v13_0_4_wait_for_bootloader(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
int ret;
int retry_loop;
for (retry_loop = 0; retry_loop < 10; retry_loop++) {
/* Wait for bootloader to signify that is
ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp,
SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
0x80000000,
0x80000000,
false);
if (ret == 0)
return 0;
}
return ret;
}
static int psp_v13_0_4_bootloader_load_component(struct psp_context *psp,
struct psp_bin_desc *bin_desc,
enum psp_bootloader_cmd bl_cmd)
{
int ret;
uint32_t psp_gfxdrv_command_reg = 0;
struct amdgpu_device *adev = psp->adev;
/* Check tOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
if (psp_v13_0_4_is_sos_alive(psp))
return 0;
ret = psp_v13_0_4_wait_for_bootloader(psp);
if (ret)
return ret;
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
/* Copy PSP KDB binary to memory */
memcpy(psp->fw_pri_buf, bin_desc->start_addr, bin_desc->size_bytes);
/* Provide the PSP KDB to bootloader */
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36,
(uint32_t)(psp->fw_pri_mc_addr >> 20));
psp_gfxdrv_command_reg = bl_cmd;
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_35,
psp_gfxdrv_command_reg);
ret = psp_v13_0_4_wait_for_bootloader(psp);
return ret;
}
static int psp_v13_0_4_bootloader_load_kdb(struct psp_context *psp)
{
return psp_v13_0_4_bootloader_load_component(psp, &psp->kdb, PSP_BL__LOAD_KEY_DATABASE);
}
static int psp_v13_0_4_bootloader_load_spl(struct psp_context *psp)
{
return psp_v13_0_4_bootloader_load_component(psp, &psp->kdb, PSP_BL__LOAD_TOS_SPL_TABLE);
}
static int psp_v13_0_4_bootloader_load_sysdrv(struct psp_context *psp)
{
return psp_v13_0_4_bootloader_load_component(psp, &psp->sys, PSP_BL__LOAD_SYSDRV);
}
static int psp_v13_0_4_bootloader_load_soc_drv(struct psp_context *psp)
{
return psp_v13_0_4_bootloader_load_component(psp, &psp->soc_drv, PSP_BL__LOAD_SOCDRV);
}
static int psp_v13_0_4_bootloader_load_intf_drv(struct psp_context *psp)
{
return psp_v13_0_4_bootloader_load_component(psp, &psp->intf_drv, PSP_BL__LOAD_INTFDRV);
}
static int psp_v13_0_4_bootloader_load_dbg_drv(struct psp_context *psp)
{
return psp_v13_0_4_bootloader_load_component(psp, &psp->dbg_drv, PSP_BL__LOAD_DBGDRV);
}
static int psp_v13_0_4_bootloader_load_sos(struct psp_context *psp)
{
int ret;
unsigned int psp_gfxdrv_command_reg = 0;
struct amdgpu_device *adev = psp->adev;
/* Check sOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
if (psp_v13_0_4_is_sos_alive(psp))
return 0;
ret = psp_v13_0_4_wait_for_bootloader(psp);
if (ret)
return ret;
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
/* Copy Secure OS binary to PSP memory */
memcpy(psp->fw_pri_buf, psp->sos.start_addr, psp->sos.size_bytes);
/* Provide the PSP secure OS to bootloader */
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36,
(uint32_t)(psp->fw_pri_mc_addr >> 20));
psp_gfxdrv_command_reg = PSP_BL__LOAD_SOSDRV;
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_35,
psp_gfxdrv_command_reg);
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_81),
RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81),
0, true);
return ret;
}
static int psp_v13_0_4_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type)
{
int ret = 0;
struct amdgpu_device *adev = psp->adev;
if (amdgpu_sriov_vf(adev)) {
/* Write the ring destroy command*/
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_101,
GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
0x80000000, 0x80000000, false);
} else {
/* Write the ring destroy command*/
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_64,
GFX_CTRL_CMD_ID_DESTROY_RINGS);
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
0x80000000, 0x80000000, false);
}
return ret;
}
static int psp_v13_0_4_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type)
{
int ret = 0;
unsigned int psp_ring_reg = 0;
struct psp_ring *ring = &psp->km_ring;
struct amdgpu_device *adev = psp->adev;
if (amdgpu_sriov_vf(adev)) {
ret = psp_v13_0_4_ring_stop(psp, ring_type);
if (ret) {
DRM_ERROR("psp_v13_0_ring_stop_sriov failed!\n");
return ret;
}
/* Write low address of the ring to C2PMSG_102 */
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_102, psp_ring_reg);
/* Write high address of the ring to C2PMSG_103 */
psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_103, psp_ring_reg);
/* Write the ring initialization command to C2PMSG_101 */
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_101,
GFX_CTRL_CMD_ID_INIT_GPCOM_RING);
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_101 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
0x80000000, 0x8000FFFF, false);
} else {
/* Wait for sOS ready for ring creation */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
0x80000000, 0x80000000, false);
if (ret) {
DRM_ERROR("Failed to wait for trust OS ready for ring creation\n");
return ret;
}
/* Write low address of the ring to C2PMSG_69 */
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_69, psp_ring_reg);
/* Write high address of the ring to C2PMSG_70 */
psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_70, psp_ring_reg);
/* Write size of ring to C2PMSG_71 */
psp_ring_reg = ring->ring_size;
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_71, psp_ring_reg);
/* Write the ring initialization command to C2PMSG_64 */
psp_ring_reg = ring_type;
psp_ring_reg = psp_ring_reg << 16;
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_64, psp_ring_reg);
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
/* Wait for response flag (bit 31) in C2PMSG_64 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
0x80000000, 0x8000FFFF, false);
}
return ret;
}
static int psp_v13_0_4_ring_destroy(struct psp_context *psp,
enum psp_ring_type ring_type)
{
int ret = 0;
struct psp_ring *ring = &psp->km_ring;
struct amdgpu_device *adev = psp->adev;
ret = psp_v13_0_4_ring_stop(psp, ring_type);
if (ret)
DRM_ERROR("Fail to stop psp ring\n");
amdgpu_bo_free_kernel(&adev->firmware.rbuf,
&ring->ring_mem_mc_addr,
(void **)&ring->ring_mem);
return ret;
}
static uint32_t psp_v13_0_4_ring_get_wptr(struct psp_context *psp)
{
uint32_t data;
struct amdgpu_device *adev = psp->adev;
if (amdgpu_sriov_vf(adev))
data = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_102);
else
data = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_67);
return data;
}
static void psp_v13_0_4_ring_set_wptr(struct psp_context *psp, uint32_t value)
{
struct amdgpu_device *adev = psp->adev;
if (amdgpu_sriov_vf(adev)) {
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_102, value);
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_101,
GFX_CTRL_CMD_ID_CONSUME_CMD);
} else
WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_67, value);
}
static const struct psp_funcs psp_v13_0_4_funcs = {
.init_microcode = psp_v13_0_4_init_microcode,
.bootloader_load_kdb = psp_v13_0_4_bootloader_load_kdb,
.bootloader_load_spl = psp_v13_0_4_bootloader_load_spl,
.bootloader_load_sysdrv = psp_v13_0_4_bootloader_load_sysdrv,
.bootloader_load_soc_drv = psp_v13_0_4_bootloader_load_soc_drv,
.bootloader_load_intf_drv = psp_v13_0_4_bootloader_load_intf_drv,
.bootloader_load_dbg_drv = psp_v13_0_4_bootloader_load_dbg_drv,
.bootloader_load_sos = psp_v13_0_4_bootloader_load_sos,
.ring_create = psp_v13_0_4_ring_create,
.ring_stop = psp_v13_0_4_ring_stop,
.ring_destroy = psp_v13_0_4_ring_destroy,
.ring_get_wptr = psp_v13_0_4_ring_get_wptr,
.ring_set_wptr = psp_v13_0_4_ring_set_wptr,
};
void psp_v13_0_4_set_psp_funcs(struct psp_context *psp)
{
psp->funcs = &psp_v13_0_4_funcs;
}
| linux-master | drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c |
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "gfxhub_v1_0.h"
#include "gfxhub_v1_1.h"
#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"
#include "gc/gc_9_0_default.h"
#include "vega10_enum.h"
#include "soc15_common.h"
static u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev)
{
return (u64)RREG32_SOC15(GC, 0, mmMC_VM_FB_OFFSET) << 24;
}
static void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev,
uint32_t vmid,
uint64_t page_table_base)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
hub->ctx_addr_distance * vmid,
lower_32_bits(page_table_base));
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
hub->ctx_addr_distance * vmid,
upper_32_bits(page_table_base));
}
static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
{
uint64_t pt_base;
if (adev->gmc.pdb0_bo)
pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo);
else
pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
gfxhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base);
/* If use GART for FB translation, vmid0 page table covers both
* vram and system memory (gart)
*/
if (adev->gmc.pdb0_bo) {
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
(u32)(adev->gmc.fb_start >> 12));
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
(u32)(adev->gmc.fb_start >> 44));
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
(u32)(adev->gmc.gart_end >> 12));
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
(u32)(adev->gmc.gart_end >> 44));
} else {
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
(u32)(adev->gmc.gart_start >> 12));
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
(u32)(adev->gmc.gart_start >> 44));
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
(u32)(adev->gmc.gart_end >> 12));
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
(u32)(adev->gmc.gart_end >> 44));
}
}
static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
{
uint64_t value;
/* Program the AGP BAR */
WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BASE, 0);
WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) {
/* Program the system aperture low logical page number. */
WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
/*
* Raven2 has a HW issue that it is unable to use the
* vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
* So here is the workaround that increase system
* aperture high address (add 1) to get rid of the VM
* fault and hardware hang.
*/
WREG32_SOC15_RLC(GC, 0,
mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
max((adev->gmc.fb_end >> 18) + 0x1,
adev->gmc.agp_end >> 18));
else
WREG32_SOC15_RLC(
GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
(u32)(value >> 44));
/* Program "protection fault". */
WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
(u32)(adev->dummy_page_addr >> 12));
WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
(u32)((u64)adev->dummy_page_addr >> 44));
WREG32_FIELD15(GC, 0, VM_L2_PROTECTION_FAULT_CNTL2,
ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
}
/* In the case squeezing vram into GART aperture, we don't use
* FB aperture and AGP aperture. Disable them.
*/
if (adev->gmc.pdb0_bo) {
WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_TOP, 0);
WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_BASE, 0x00FFFFFF);
WREG32_SOC15(GC, 0, mmMC_VM_AGP_TOP, 0);
WREG32_SOC15(GC, 0, mmMC_VM_AGP_BOT, 0xFFFFFF);
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF);
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
}
}
static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
{
uint32_t tmp;
/* Setup TLB control */
tmp = RREG32_SOC15(GC, 0, mmMC_VM_MX_L1_TLB_CNTL);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
ENABLE_ADVANCED_DRIVER_MODEL, 1);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
MTYPE, MTYPE_UC);/* XXX for emulation. */
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
WREG32_SOC15_RLC(GC, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
}
static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
{
uint32_t tmp;
/* Setup L2 cache */
tmp = RREG32_SOC15(GC, 0, mmVM_L2_CNTL);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
/* XXX for emulation, Refer to closed source code.*/
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
0);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
WREG32_SOC15_RLC(GC, 0, mmVM_L2_CNTL, tmp);
tmp = RREG32_SOC15(GC, 0, mmVM_L2_CNTL2);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
WREG32_SOC15_RLC(GC, 0, mmVM_L2_CNTL2, tmp);
tmp = mmVM_L2_CNTL3_DEFAULT;
if (adev->gmc.translate_further) {
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
} else {
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
}
WREG32_SOC15_RLC(GC, 0, mmVM_L2_CNTL3, tmp);
tmp = mmVM_L2_CNTL4_DEFAULT;
if (adev->gmc.xgmi.connected_to_cpu) {
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 1);
} else {
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
}
WREG32_SOC15_RLC(GC, 0, mmVM_L2_CNTL4, tmp);
}
static void gfxhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
{
uint32_t tmp;
tmp = RREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH,
adev->gmc.vmid0_page_table_depth);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE,
adev->gmc.vmid0_page_table_block_size);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL, tmp);
}
static void gfxhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
{
WREG32_SOC15(GC, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
0XFFFFFFFF);
WREG32_SOC15(GC, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
0x0000000F);
WREG32_SOC15(GC, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
0);
WREG32_SOC15(GC, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
0);
WREG32_SOC15(GC, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
WREG32_SOC15(GC, 0, mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
}
static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
unsigned int num_level, block_size;
uint32_t tmp;
int i;
num_level = adev->vm_manager.num_level;
block_size = adev->vm_manager.block_size;
if (adev->gmc.translate_further)
num_level -= 1;
else
block_size -= 9;
for (i = 0; i <= 14; i++) {
tmp = RREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL, i);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
num_level);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
PAGE_TABLE_BLOCK_SIZE,
block_size);
/* Send no-retry XNACK on fault to suppress VM fault storm.
* On Aldebaran, XNACK can be enabled in the SQ per-process.
* Retry faults need to be enabled for that to work.
*/
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
!adev->gmc.noretry ||
adev->asic_type == CHIP_ALDEBARAN);
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL,
i * hub->ctx_distance, tmp);
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
i * hub->ctx_addr_distance, 0);
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
i * hub->ctx_addr_distance, 0);
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
i * hub->ctx_addr_distance,
lower_32_bits(adev->vm_manager.max_pfn - 1));
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
i * hub->ctx_addr_distance,
upper_32_bits(adev->vm_manager.max_pfn - 1));
}
}
static void gfxhub_v1_0_program_invalidation(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
unsigned int i;
for (i = 0 ; i < 18; ++i) {
WREG32_SOC15_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
i * hub->eng_addr_distance, 0xffffffff);
WREG32_SOC15_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
i * hub->eng_addr_distance, 0x1f);
}
}
static int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
{
/* GART Enable. */
gfxhub_v1_0_init_gart_aperture_regs(adev);
gfxhub_v1_0_init_system_aperture_regs(adev);
gfxhub_v1_0_init_tlb_regs(adev);
if (!amdgpu_sriov_vf(adev))
gfxhub_v1_0_init_cache_regs(adev);
gfxhub_v1_0_enable_system_domain(adev);
if (!amdgpu_sriov_vf(adev))
gfxhub_v1_0_disable_identity_aperture(adev);
gfxhub_v1_0_setup_vmid_config(adev);
gfxhub_v1_0_program_invalidation(adev);
return 0;
}
static void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
u32 tmp;
u32 i;
/* Disable all tables */
for (i = 0; i < 16; i++)
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL,
i * hub->ctx_distance, 0);
if (amdgpu_sriov_vf(adev))
/* Avoid write to GMC registers */
return;
/* Setup TLB control */
tmp = RREG32_SOC15(GC, 0, mmMC_VM_MX_L1_TLB_CNTL);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
tmp = REG_SET_FIELD(tmp,
MC_VM_MX_L1_TLB_CNTL,
ENABLE_ADVANCED_DRIVER_MODEL,
0);
WREG32_SOC15_RLC(GC, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
/* Setup L2 cache */
WREG32_FIELD15(GC, 0, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
WREG32_SOC15(GC, 0, mmVM_L2_CNTL3, 0);
}
/**
* gfxhub_v1_0_set_fault_enable_default - update GART/VM fault handling
*
* @adev: amdgpu_device pointer
* @value: true redirects VM faults to the default page
*/
static void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value)
{
u32 tmp;
tmp = RREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp,
VM_L2_PROTECTION_FAULT_CNTL,
TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
if (!value) {
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
CRASH_ON_NO_RETRY_FAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
CRASH_ON_RETRY_FAULT, 1);
}
WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
}
static void gfxhub_v1_0_init(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
hub->ctx0_ptb_addr_lo32 =
SOC15_REG_OFFSET(GC, 0,
mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
hub->ctx0_ptb_addr_hi32 =
SOC15_REG_OFFSET(GC, 0,
mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
hub->vm_inv_eng0_sem =
SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_SEM);
hub->vm_inv_eng0_req =
SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_REQ);
hub->vm_inv_eng0_ack =
SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_ACK);
hub->vm_context0_cntl =
SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL);
hub->vm_l2_pro_fault_status =
SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_STATUS);
hub->vm_l2_pro_fault_cntl =
SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
hub->ctx_distance = mmVM_CONTEXT1_CNTL - mmVM_CONTEXT0_CNTL;
hub->ctx_addr_distance = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
hub->eng_distance = mmVM_INVALIDATE_ENG1_REQ - mmVM_INVALIDATE_ENG0_REQ;
hub->eng_addr_distance = mmVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
}
const struct amdgpu_gfxhub_funcs gfxhub_v1_0_funcs = {
.get_mc_fb_offset = gfxhub_v1_0_get_mc_fb_offset,
.setup_vm_pt_regs = gfxhub_v1_0_setup_vm_pt_regs,
.gart_enable = gfxhub_v1_0_gart_enable,
.gart_disable = gfxhub_v1_0_gart_disable,
.set_fault_enable_default = gfxhub_v1_0_set_fault_enable_default,
.init = gfxhub_v1_0_init,
.get_xgmi_info = gfxhub_v1_1_get_xgmi_info,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c |
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "amdgpu_jpeg.h"
#include "amdgpu_pm.h"
#include "soc15.h"
#include "soc15d.h"
#include "jpeg_v2_0.h"
#include "jpeg_v4_0.h"
#include "mmsch_v4_0.h"
#include "vcn/vcn_4_0_0_offset.h"
#include "vcn/vcn_4_0_0_sh_mask.h"
#include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
#define regUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
static int jpeg_v4_0_start_sriov(struct amdgpu_device *adev);
static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v4_0_set_irq_funcs(struct amdgpu_device *adev);
static int jpeg_v4_0_set_powergating_state(void *handle,
enum amd_powergating_state state);
static void jpeg_v4_0_set_ras_funcs(struct amdgpu_device *adev);
static void jpeg_v4_0_dec_ring_set_wptr(struct amdgpu_ring *ring);
/**
* jpeg_v4_0_early_init - set function pointers
*
* @handle: amdgpu_device pointer
*
* Set ring and irq function pointers
*/
static int jpeg_v4_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->jpeg.num_jpeg_inst = 1;
adev->jpeg.num_jpeg_rings = 1;
jpeg_v4_0_set_dec_ring_funcs(adev);
jpeg_v4_0_set_irq_funcs(adev);
jpeg_v4_0_set_ras_funcs(adev);
return 0;
}
/**
* jpeg_v4_0_sw_init - sw init for JPEG block
*
* @handle: amdgpu_device pointer
*
* Load firmware and sw initialization
*/
static int jpeg_v4_0_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
int r;
/* JPEG TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
VCN_4_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq);
if (r)
return r;
/* JPEG DJPEG POISON EVENT */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
if (r)
return r;
/* JPEG EJPEG POISON EVENT */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
if (r)
return r;
r = amdgpu_jpeg_sw_init(adev);
if (r)
return r;
r = amdgpu_jpeg_resume(adev);
if (r)
return r;
ring = adev->jpeg.inst->ring_dec;
ring->use_doorbell = true;
ring->doorbell_index = amdgpu_sriov_vf(adev) ? (((adev->doorbell_index.vcn.vcn_ring0_1) << 1) + 4) : ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1);
ring->vm_hub = AMDGPU_MMHUB0(0);
sprintf(ring->name, "jpeg_dec");
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
adev->jpeg.internal.jpeg_pitch[0] = regUVD_JPEG_PITCH_INTERNAL_OFFSET;
adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH);
r = amdgpu_jpeg_ras_sw_init(adev);
if (r)
return r;
return 0;
}
/**
* jpeg_v4_0_sw_fini - sw fini for JPEG block
*
* @handle: amdgpu_device pointer
*
* JPEG suspend and free up sw allocation
*/
static int jpeg_v4_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
r = amdgpu_jpeg_suspend(adev);
if (r)
return r;
r = amdgpu_jpeg_sw_fini(adev);
return r;
}
/**
* jpeg_v4_0_hw_init - start and test JPEG block
*
* @handle: amdgpu_device pointer
*
*/
static int jpeg_v4_0_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
int r;
if (amdgpu_sriov_vf(adev)) {
r = jpeg_v4_0_start_sriov(adev);
if (r)
return r;
ring->wptr = 0;
ring->wptr_old = 0;
jpeg_v4_0_dec_ring_set_wptr(ring);
ring->sched.ready = true;
} else {
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
(adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL,
ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
VCN_JPEG_DB_CTRL__EN_MASK);
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
}
DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully.\n");
return 0;
}
/**
* jpeg_v4_0_hw_fini - stop the hardware block
*
* @handle: amdgpu_device pointer
*
* Stop the JPEG block, mark ring as not ready any more
*/
static int jpeg_v4_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
cancel_delayed_work_sync(&adev->vcn.idle_work);
if (!amdgpu_sriov_vf(adev)) {
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
jpeg_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
}
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0);
return 0;
}
/**
* jpeg_v4_0_suspend - suspend JPEG block
*
* @handle: amdgpu_device pointer
*
* HW fini and suspend JPEG block
*/
static int jpeg_v4_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
r = jpeg_v4_0_hw_fini(adev);
if (r)
return r;
r = amdgpu_jpeg_suspend(adev);
return r;
}
/**
* jpeg_v4_0_resume - resume JPEG block
*
* @handle: amdgpu_device pointer
*
* Resume firmware and hw init JPEG block
*/
static int jpeg_v4_0_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
r = amdgpu_jpeg_resume(adev);
if (r)
return r;
r = jpeg_v4_0_hw_init(adev);
return r;
}
static void jpeg_v4_0_disable_clock_gating(struct amdgpu_device *adev)
{
uint32_t data = 0;
data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL);
if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) {
data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
data &= (~JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK);
} else {
data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
}
data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data);
data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE);
data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
| JPEG_CGC_GATE__JPEG2_DEC_MASK
| JPEG_CGC_GATE__JMCIF_MASK
| JPEG_CGC_GATE__JRBBM_MASK);
WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data);
}
static void jpeg_v4_0_enable_clock_gating(struct amdgpu_device *adev)
{
uint32_t data = 0;
data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL);
if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) {
data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
data |= JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK;
} else {
data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
}
data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data);
data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE);
data |= (JPEG_CGC_GATE__JPEG_DEC_MASK
|JPEG_CGC_GATE__JPEG2_DEC_MASK
|JPEG_CGC_GATE__JMCIF_MASK
|JPEG_CGC_GATE__JRBBM_MASK);
WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data);
}
static int jpeg_v4_0_disable_static_power_gating(struct amdgpu_device *adev)
{
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
uint32_t data = 0;
int r = 0;
data = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
WREG32(SOC15_REG_OFFSET(JPEG, 0, regUVD_PGFSM_CONFIG), data);
r = SOC15_WAIT_ON_RREG(JPEG, 0,
regUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON,
UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
if (r) {
DRM_DEV_ERROR(adev->dev, "amdgpu: JPEG disable power gating failed\n");
return r;
}
}
/* disable anti hang mechanism */
WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0,
~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
/* keep the JPEG in static PG mode */
WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0,
~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK);
return 0;
}
static int jpeg_v4_0_enable_static_power_gating(struct amdgpu_device *adev)
{
/* enable anti hang mechanism */
WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS),
UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
uint32_t data = 0;
int r = 0;
data = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
WREG32(SOC15_REG_OFFSET(JPEG, 0, regUVD_PGFSM_CONFIG), data);
r = SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_PGFSM_STATUS,
(2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT),
UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
if (r) {
DRM_DEV_ERROR(adev->dev, "amdgpu: JPEG enable power gating failed\n");
return r;
}
}
return 0;
}
/**
* jpeg_v4_0_start - start JPEG block
*
* @adev: amdgpu_device pointer
*
* Setup and start the JPEG block
*/
static int jpeg_v4_0_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
int r;
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_jpeg(adev, true);
/* disable power gating */
r = jpeg_v4_0_disable_static_power_gating(adev);
if (r)
return r;
/* JPEG disable CGC */
jpeg_v4_0_disable_clock_gating(adev);
/* MJPEG global tiling registers */
WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG,
adev->gfx.config.gb_addr_config);
/* enable JMI channel */
WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), 0,
~UVD_JMI_CNTL__SOFT_RESET_MASK);
/* enable System Interrupt for JRBC */
WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regJPEG_SYS_INT_EN),
JPEG_SYS_INT_EN__DJRBC_MASK,
~JPEG_SYS_INT_EN__DJRBC_MASK);
WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_VMID, 0);
WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
upper_32_bits(ring->gpu_addr));
WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_RPTR, 0);
WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR, 0);
WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, 0x00000002L);
WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_SIZE, ring->ring_size / 4);
ring->wptr = RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR);
return 0;
}
static int jpeg_v4_0_start_sriov(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
uint64_t ctx_addr;
uint32_t param, resp, expected;
uint32_t tmp, timeout;
struct amdgpu_mm_table *table = &adev->virt.mm_table;
uint32_t *table_loc;
uint32_t table_size;
uint32_t size, size_dw;
uint32_t init_status;
struct mmsch_v4_0_cmd_direct_write
direct_wt = { {0} };
struct mmsch_v4_0_cmd_end end = { {0} };
struct mmsch_v4_0_init_header header;
direct_wt.cmd_header.command_type =
MMSCH_COMMAND__DIRECT_REG_WRITE;
end.cmd_header.command_type =
MMSCH_COMMAND__END;
header.version = MMSCH_VERSION;
header.total_size = RREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_SIZE);
header.jpegdec.init_status = 0;
header.jpegdec.table_offset = 0;
header.jpegdec.table_size = 0;
table_loc = (uint32_t *)table->cpu_addr;
table_loc += header.total_size;
table_size = 0;
ring = adev->jpeg.inst->ring_dec;
MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(JPEG, 0,
regUVD_LMI_JRBC_RB_64BIT_BAR_LOW),
lower_32_bits(ring->gpu_addr));
MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(JPEG, 0,
regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH),
upper_32_bits(ring->gpu_addr));
MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(JPEG, 0,
regUVD_JRBC_RB_SIZE), ring->ring_size / 4);
/* add end packet */
MMSCH_V4_0_INSERT_END();
/* refine header */
header.jpegdec.init_status = 0;
header.jpegdec.table_offset = header.total_size;
header.jpegdec.table_size = table_size;
header.total_size += table_size;
/* Update init table header in memory */
size = sizeof(struct mmsch_v4_0_init_header);
table_loc = (uint32_t *)table->cpu_addr;
memcpy((void *)table_loc, &header, size);
/* message MMSCH (in VCN[0]) to initialize this client
* 1, write to mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
* of memory descriptor location
*/
ctx_addr = table->gpu_addr;
WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
/* 2, update vmid of descriptor */
tmp = RREG32_SOC15(VCN, 0, regMMSCH_VF_VMID);
tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
/* use domain0 for MM scheduler */
tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
WREG32_SOC15(VCN, 0, regMMSCH_VF_VMID, tmp);
/* 3, notify mmsch about the size of this descriptor */
size = header.total_size;
WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_SIZE, size);
/* 4, set resp to zero */
WREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_RESP, 0);
/* 5, kick off the initialization and wait until
* MMSCH_VF_MAILBOX_RESP becomes non-zero
*/
param = 0x00000001;
WREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_HOST, param);
tmp = 0;
timeout = 1000;
resp = 0;
expected = MMSCH_VF_MAILBOX_RESP__OK;
init_status = ((struct mmsch_v4_0_init_header *)(table_loc))->jpegdec.init_status;
while (resp != expected) {
resp = RREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_RESP);
if (resp != 0)
break;
udelay(10);
tmp = tmp + 10;
if (tmp >= timeout) {
DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
" waiting for regMMSCH_VF_MAILBOX_RESP "\
"(expected=0x%08x, readback=0x%08x)\n",
tmp, expected, resp);
return -EBUSY;
}
}
if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE && init_status != MMSCH_VF_ENGINE_STATUS__PASS)
DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init status for jpeg: %x\n", resp, init_status);
return 0;
}
/**
* jpeg_v4_0_stop - stop JPEG block
*
* @adev: amdgpu_device pointer
*
* stop the JPEG block
*/
static int jpeg_v4_0_stop(struct amdgpu_device *adev)
{
int r;
/* reset JMI */
WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL),
UVD_JMI_CNTL__SOFT_RESET_MASK,
~UVD_JMI_CNTL__SOFT_RESET_MASK);
jpeg_v4_0_enable_clock_gating(adev);
/* enable power gating */
r = jpeg_v4_0_enable_static_power_gating(adev);
if (r)
return r;
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_jpeg(adev, false);
return 0;
}
/**
* jpeg_v4_0_dec_ring_get_rptr - get read pointer
*
* @ring: amdgpu_ring pointer
*
* Returns the current hardware read pointer
*/
static uint64_t jpeg_v4_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
return RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_RPTR);
}
/**
* jpeg_v4_0_dec_ring_get_wptr - get write pointer
*
* @ring: amdgpu_ring pointer
*
* Returns the current hardware write pointer
*/
static uint64_t jpeg_v4_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring->use_doorbell)
return *ring->wptr_cpu_addr;
else
return RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR);
}
/**
* jpeg_v4_0_dec_ring_set_wptr - set write pointer
*
* @ring: amdgpu_ring pointer
*
* Commits the write pointer to the hardware
*/
static void jpeg_v4_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring->use_doorbell) {
*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
} else {
WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
}
}
static bool jpeg_v4_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret = 1;
ret &= (((RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS) &
UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
UVD_JRBC_STATUS__RB_JOB_DONE_MASK));
return ret;
}
static int jpeg_v4_0_wait_for_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_JRBC_STATUS,
UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
}
static int jpeg_v4_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = state == AMD_CG_STATE_GATE;
if (enable) {
if (!jpeg_v4_0_is_idle(handle))
return -EBUSY;
jpeg_v4_0_enable_clock_gating(adev);
} else {
jpeg_v4_0_disable_clock_gating(adev);
}
return 0;
}
static int jpeg_v4_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
if (amdgpu_sriov_vf(adev)) {
adev->jpeg.cur_state = AMD_PG_STATE_UNGATE;
return 0;
}
if (state == adev->jpeg.cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
ret = jpeg_v4_0_stop(adev);
else
ret = jpeg_v4_0_start(adev);
if (!ret)
adev->jpeg.cur_state = state;
return ret;
}
static int jpeg_v4_0_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
return 0;
}
static int jpeg_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned int type,
enum amdgpu_interrupt_state state)
{
return 0;
}
static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
DRM_DEBUG("IH: JPEG TRAP\n");
switch (entry->src_id) {
case VCN_4_0__SRCID__JPEG_DECODE:
amdgpu_fence_process(adev->jpeg.inst->ring_dec);
break;
default:
DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
break;
}
return 0;
}
static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = {
.name = "jpeg_v4_0",
.early_init = jpeg_v4_0_early_init,
.late_init = NULL,
.sw_init = jpeg_v4_0_sw_init,
.sw_fini = jpeg_v4_0_sw_fini,
.hw_init = jpeg_v4_0_hw_init,
.hw_fini = jpeg_v4_0_hw_fini,
.suspend = jpeg_v4_0_suspend,
.resume = jpeg_v4_0_resume,
.is_idle = jpeg_v4_0_is_idle,
.wait_for_idle = jpeg_v4_0_wait_for_idle,
.check_soft_reset = NULL,
.pre_soft_reset = NULL,
.soft_reset = NULL,
.post_soft_reset = NULL,
.set_clockgating_state = jpeg_v4_0_set_clockgating_state,
.set_powergating_state = jpeg_v4_0_set_powergating_state,
};
static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_JPEG,
.align_mask = 0xf,
.get_rptr = jpeg_v4_0_dec_ring_get_rptr,
.get_wptr = jpeg_v4_0_dec_ring_get_wptr,
.set_wptr = jpeg_v4_0_dec_ring_set_wptr,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
8 + /* jpeg_v4_0_dec_ring_emit_vm_flush */
18 + 18 + /* jpeg_v4_0_dec_ring_emit_fence x2 vm fence */
8 + 16,
.emit_ib_size = 22, /* jpeg_v4_0_dec_ring_emit_ib */
.emit_ib = jpeg_v2_0_dec_ring_emit_ib,
.emit_fence = jpeg_v2_0_dec_ring_emit_fence,
.emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush,
.test_ring = amdgpu_jpeg_dec_ring_test_ring,
.test_ib = amdgpu_jpeg_dec_ring_test_ib,
.insert_nop = jpeg_v2_0_dec_ring_nop,
.insert_start = jpeg_v2_0_dec_ring_insert_start,
.insert_end = jpeg_v2_0_dec_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_jpeg_ring_begin_use,
.end_use = amdgpu_jpeg_ring_end_use,
.emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev)
{
adev->jpeg.inst->ring_dec->funcs = &jpeg_v4_0_dec_ring_vm_funcs;
DRM_DEV_INFO(adev->dev, "JPEG decode is enabled in VM mode\n");
}
static const struct amdgpu_irq_src_funcs jpeg_v4_0_irq_funcs = {
.set = jpeg_v4_0_set_interrupt_state,
.process = jpeg_v4_0_process_interrupt,
};
static const struct amdgpu_irq_src_funcs jpeg_v4_0_ras_irq_funcs = {
.set = jpeg_v4_0_set_ras_interrupt_state,
.process = amdgpu_jpeg_process_poison_irq,
};
static void jpeg_v4_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->jpeg.inst->irq.num_types = 1;
adev->jpeg.inst->irq.funcs = &jpeg_v4_0_irq_funcs;
adev->jpeg.inst->ras_poison_irq.num_types = 1;
adev->jpeg.inst->ras_poison_irq.funcs = &jpeg_v4_0_ras_irq_funcs;
}
const struct amdgpu_ip_block_version jpeg_v4_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_JPEG,
.major = 4,
.minor = 0,
.rev = 0,
.funcs = &jpeg_v4_0_ip_funcs,
};
static uint32_t jpeg_v4_0_query_poison_by_instance(struct amdgpu_device *adev,
uint32_t instance, uint32_t sub_block)
{
uint32_t poison_stat = 0, reg_value = 0;
switch (sub_block) {
case AMDGPU_JPEG_V4_0_JPEG0:
reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG0_STATUS);
poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG0_STATUS, POISONED_PF);
break;
case AMDGPU_JPEG_V4_0_JPEG1:
reg_value = RREG32_SOC15(JPEG, instance, regUVD_RAS_JPEG1_STATUS);
poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG1_STATUS, POISONED_PF);
break;
default:
break;
}
if (poison_stat)
dev_info(adev->dev, "Poison detected in JPEG%d sub_block%d\n",
instance, sub_block);
return poison_stat;
}
static bool jpeg_v4_0_query_ras_poison_status(struct amdgpu_device *adev)
{
uint32_t inst = 0, sub = 0, poison_stat = 0;
for (inst = 0; inst < adev->jpeg.num_jpeg_inst; inst++)
for (sub = 0; sub < AMDGPU_JPEG_V4_0_MAX_SUB_BLOCK; sub++)
poison_stat +=
jpeg_v4_0_query_poison_by_instance(adev, inst, sub);
return !!poison_stat;
}
const struct amdgpu_ras_block_hw_ops jpeg_v4_0_ras_hw_ops = {
.query_poison_status = jpeg_v4_0_query_ras_poison_status,
};
static struct amdgpu_jpeg_ras jpeg_v4_0_ras = {
.ras_block = {
.hw_ops = &jpeg_v4_0_ras_hw_ops,
.ras_late_init = amdgpu_jpeg_ras_late_init,
},
};
static void jpeg_v4_0_set_ras_funcs(struct amdgpu_device *adev)
{
switch (adev->ip_versions[JPEG_HWIP][0]) {
case IP_VERSION(4, 0, 0):
adev->jpeg.ras = &jpeg_v4_0_ras;
break;
default:
break;
}
}
| linux-master | drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c |
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "nbio_v2_3.h"
#include "nbio/nbio_2_3_default.h"
#include "nbio/nbio_2_3_offset.h"
#include "nbio/nbio_2_3_sh_mask.h"
#include <uapi/linux/kfd_ioctl.h>
#include <linux/pci.h>
#define smnPCIE_CONFIG_CNTL 0x11180044
#define smnCPM_CONTROL 0x11180460
#define smnPCIE_CNTL2 0x11180070
#define smnPCIE_LC_CNTL 0x11140280
#define smnPCIE_LC_CNTL3 0x111402d4
#define smnPCIE_LC_CNTL6 0x111402ec
#define smnPCIE_LC_CNTL7 0x111402f0
#define smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2 0x1014008c
#define smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL 0x10123538
#define smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP 0x10140324
#define smnPSWUSP0_PCIE_LC_CNTL2 0x111402c4
#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c
#define mmBIF_SDMA2_DOORBELL_RANGE 0x01d6
#define mmBIF_SDMA2_DOORBELL_RANGE_BASE_IDX 2
#define mmBIF_SDMA3_DOORBELL_RANGE 0x01d7
#define mmBIF_SDMA3_DOORBELL_RANGE_BASE_IDX 2
#define mmBIF_MMSCH1_DOORBELL_RANGE 0x01d8
#define mmBIF_MMSCH1_DOORBELL_RANGE_BASE_IDX 2
#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
#define GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L /* Don't use. Firmware uses this bit internally */
#define GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
#define GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
#define GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
#define GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
#define GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
#define GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
#define GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
#define GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
static void nbio_v2_3_remap_hdp_registers(struct amdgpu_device *adev)
{
WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL,
adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
}
static u32 nbio_v2_3_get_rev_id(struct amdgpu_device *adev)
{
u32 tmp;
/*
* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
* therefore we force rev_id to 0 (which is the default value)
*/
if (amdgpu_sriov_vf(adev)) {
return 0;
}
tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
return tmp;
}
static void nbio_v2_3_mc_access_enable(struct amdgpu_device *adev, bool enable)
{
if (enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
BIF_FB_EN__FB_READ_EN_MASK |
BIF_FB_EN__FB_WRITE_EN_MASK);
else
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
}
static u32 nbio_v2_3_get_memsize(struct amdgpu_device *adev)
{
return RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE);
}
static void nbio_v2_3_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
bool use_doorbell, int doorbell_index,
int doorbell_size)
{
u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
instance == 1 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE) :
instance == 2 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA2_DOORBELL_RANGE) :
SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA3_DOORBELL_RANGE);
u32 doorbell_range = RREG32(reg);
if (use_doorbell) {
doorbell_range = REG_SET_FIELD(doorbell_range,
BIF_SDMA0_DOORBELL_RANGE, OFFSET,
doorbell_index);
doorbell_range = REG_SET_FIELD(doorbell_range,
BIF_SDMA0_DOORBELL_RANGE, SIZE,
doorbell_size);
} else
doorbell_range = REG_SET_FIELD(doorbell_range,
BIF_SDMA0_DOORBELL_RANGE, SIZE,
0);
WREG32(reg, doorbell_range);
}
static void nbio_v2_3_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell,
int doorbell_index, int instance)
{
u32 reg = instance ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH1_DOORBELL_RANGE) :
SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH0_DOORBELL_RANGE);
u32 doorbell_range = RREG32(reg);
if (use_doorbell) {
doorbell_range = REG_SET_FIELD(doorbell_range,
BIF_MMSCH0_DOORBELL_RANGE, OFFSET,
doorbell_index);
doorbell_range = REG_SET_FIELD(doorbell_range,
BIF_MMSCH0_DOORBELL_RANGE, SIZE, 8);
} else
doorbell_range = REG_SET_FIELD(doorbell_range,
BIF_MMSCH0_DOORBELL_RANGE, SIZE, 0);
WREG32(reg, doorbell_range);
}
static void nbio_v2_3_enable_doorbell_aperture(struct amdgpu_device *adev,
bool enable)
{
WREG32_FIELD15(NBIO, 0, RCC_DEV0_EPF0_RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN,
enable ? 1 : 0);
}
static void nbio_v2_3_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
bool enable)
{
u32 tmp = 0;
if (enable) {
tmp = REG_SET_FIELD(tmp, BIF_BX_PF_DOORBELL_SELFRING_GPA_APER_CNTL,
DOORBELL_SELFRING_GPA_APER_EN, 1) |
REG_SET_FIELD(tmp, BIF_BX_PF_DOORBELL_SELFRING_GPA_APER_CNTL,
DOORBELL_SELFRING_GPA_APER_MODE, 1) |
REG_SET_FIELD(tmp, BIF_BX_PF_DOORBELL_SELFRING_GPA_APER_CNTL,
DOORBELL_SELFRING_GPA_APER_SIZE, 0);
WREG32_SOC15(NBIO, 0, mmBIF_BX_PF_DOORBELL_SELFRING_GPA_APER_BASE_LOW,
lower_32_bits(adev->doorbell.base));
WREG32_SOC15(NBIO, 0, mmBIF_BX_PF_DOORBELL_SELFRING_GPA_APER_BASE_HIGH,
upper_32_bits(adev->doorbell.base));
}
WREG32_SOC15(NBIO, 0, mmBIF_BX_PF_DOORBELL_SELFRING_GPA_APER_CNTL,
tmp);
}
static void nbio_v2_3_ih_doorbell_range(struct amdgpu_device *adev,
bool use_doorbell, int doorbell_index)
{
u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE);
if (use_doorbell) {
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
BIF_IH_DOORBELL_RANGE, OFFSET,
doorbell_index);
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
BIF_IH_DOORBELL_RANGE, SIZE,
2);
} else
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
BIF_IH_DOORBELL_RANGE, SIZE,
0);
WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range);
}
static void nbio_v2_3_ih_control(struct amdgpu_device *adev)
{
u32 interrupt_cntl;
/* setup interrupt control */
WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL);
/*
* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
*/
interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL,
IH_DUMMY_RD_OVERRIDE, 0);
/* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL,
IH_REQ_NONSNOOP_EN, 0);
WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
}
static void nbio_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t def, data;
if (!(adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG))
return;
def = data = RREG32_PCIE(smnCPM_CONTROL);
if (enable) {
data |= (CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
} else {
data &= ~(CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
}
if (def != data)
WREG32_PCIE(smnCPM_CONTROL, data);
}
static void nbio_v2_3_update_medium_grain_light_sleep(struct amdgpu_device *adev,
bool enable)
{
uint32_t def, data;
if (!(adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
return;
def = data = RREG32_PCIE(smnPCIE_CNTL2);
if (enable) {
data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
PCIE_CNTL2__MST_MEM_LS_EN_MASK |
PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
} else {
data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
PCIE_CNTL2__MST_MEM_LS_EN_MASK |
PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
}
if (def != data)
WREG32_PCIE(smnPCIE_CNTL2, data);
}
static void nbio_v2_3_get_clockgating_state(struct amdgpu_device *adev,
u64 *flags)
{
int data;
/* AMD_CG_SUPPORT_BIF_MGCG */
data = RREG32_PCIE(smnCPM_CONTROL);
if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
*flags |= AMD_CG_SUPPORT_BIF_MGCG;
/* AMD_CG_SUPPORT_BIF_LS */
data = RREG32_PCIE(smnPCIE_CNTL2);
if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
*flags |= AMD_CG_SUPPORT_BIF_LS;
}
static u32 nbio_v2_3_get_hdp_flush_req_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_GPU_HDP_FLUSH_REQ);
}
static u32 nbio_v2_3_get_hdp_flush_done_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_GPU_HDP_FLUSH_DONE);
}
static u32 nbio_v2_3_get_pcie_index_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2);
}
static u32 nbio_v2_3_get_pcie_data_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
}
const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg = {
.ref_and_mask_cp0 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP0_MASK,
.ref_and_mask_cp1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP1_MASK,
.ref_and_mask_cp2 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP2_MASK,
.ref_and_mask_cp3 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP3_MASK,
.ref_and_mask_cp4 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP4_MASK,
.ref_and_mask_cp5 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP5_MASK,
.ref_and_mask_cp6 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP6_MASK,
.ref_and_mask_cp7 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP7_MASK,
.ref_and_mask_cp8 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP8_MASK,
.ref_and_mask_cp9 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP9_MASK,
.ref_and_mask_sdma0 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA0_MASK,
.ref_and_mask_sdma1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA1_MASK,
};
static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
{
uint32_t def, data;
def = data = RREG32_PCIE(smnPCIE_CONFIG_CNTL);
data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1);
if (def != data)
WREG32_PCIE(smnPCIE_CONFIG_CNTL, data);
if (amdgpu_sriov_vf(adev))
adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
}
#define NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT 0x00000000 // off by default, no gains over L1
#define NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT 0x0000000A // 1=1us, 9=1ms, 10=4ms
#define NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT 0x0000000E // 400ms
static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
bool enable)
{
uint32_t def, data;
def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
if (enable) {
/* Disable ASPM L0s/L1 first */
data &= ~(PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK | PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK);
data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
if (pci_is_thunderbolt_attached(adev->pdev))
data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
else
data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
} else {
/* Disbale ASPM L1 */
data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
/* Disable ASPM TxL0s */
data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
/* Disable ACPI L1 */
data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
}
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL, data);
}
#ifdef CONFIG_PCIEASPM
static void nbio_v2_3_program_ltr(struct amdgpu_device *adev)
{
uint32_t def, data;
WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, 0x75EB);
def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP2);
data &= ~RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK;
if (def != data)
WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP2, data);
def = data = RREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL);
data &= ~EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK;
if (def != data)
WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, data);
def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
data |= BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
if (def != data)
WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
}
#endif
static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
{
#ifdef CONFIG_PCIEASPM
uint32_t def, data;
def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL, data);
def = data = RREG32_PCIE(smnPCIE_LC_CNTL7);
data |= PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK;
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL7, data);
def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK);
data |= NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK;
if (def != data)
WREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK, data);
def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
data |= PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL3, data);
def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3);
data &= ~RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK;
data &= ~RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK;
if (def != data)
WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3, data);
def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5);
data &= ~RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK;
if (def != data)
WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5, data);
def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
if (def != data)
WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP, 0x10011001);
def = data = RREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2);
data |= PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK |
PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
data &= ~PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK;
if (def != data)
WREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2, data);
def = data = RREG32_PCIE(smnPCIE_LC_CNTL6);
data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK |
PCIE_LC_CNTL6__LC_RX_L0S_STANDBY_EN_MASK;
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL6, data);
/* Don't bother about LTR if LTR is not enabled
* in the path */
if (adev->pdev->ltr_path)
nbio_v2_3_program_ltr(adev);
def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3);
data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
data |= 0x0010 << RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT;
if (def != data)
WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3, data);
def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5);
data |= 0x0010 << RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT;
if (def != data)
WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5, data);
def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
if (pci_is_thunderbolt_attached(adev->pdev))
data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
else
data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL, data);
def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL3, data);
#endif
}
static void nbio_v2_3_apply_lc_spc_mode_wa(struct amdgpu_device *adev)
{
uint32_t reg_data = 0;
uint32_t link_width = 0;
if (!((adev->asic_type >= CHIP_NAVI10) &&
(adev->asic_type <= CHIP_NAVI12)))
return;
reg_data = RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL);
link_width = (reg_data & PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
>> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
/*
* Program PCIE_LC_CNTL6.LC_SPC_MODE_8GT to 0x2 (4 symbols per clock data)
* if link_width is 0x3 (x4)
*/
if (0x3 == link_width) {
reg_data = RREG32_PCIE(smnPCIE_LC_CNTL6);
reg_data &= ~PCIE_LC_CNTL6__LC_SPC_MODE_8GT_MASK;
reg_data |= (0x2 << PCIE_LC_CNTL6__LC_SPC_MODE_8GT__SHIFT);
WREG32_PCIE(smnPCIE_LC_CNTL6, reg_data);
}
}
static void nbio_v2_3_apply_l1_link_width_reconfig_wa(struct amdgpu_device *adev)
{
uint32_t reg_data = 0;
if (adev->asic_type != CHIP_NAVI10)
return;
reg_data = RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL);
reg_data |= PCIE_LC_LINK_WIDTH_CNTL__LC_L1_RECONFIG_EN_MASK;
WREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL, reg_data);
}
static void nbio_v2_3_clear_doorbell_interrupt(struct amdgpu_device *adev)
{
uint32_t reg, reg_data;
if (adev->ip_versions[NBIO_HWIP][0] != IP_VERSION(3, 3, 0))
return;
reg = RREG32_SOC15(NBIO, 0, mmBIF_RB_CNTL);
/* Clear Interrupt Status
*/
if ((reg & BIF_RB_CNTL__RB_ENABLE_MASK) == 0) {
reg = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
if (reg & BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_STATUS_MASK) {
reg_data = 1 << BIF_DOORBELL_INT_CNTL__DOORBELL_INTERRUPT_CLEAR__SHIFT;
WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, reg_data);
}
}
}
const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
.get_hdp_flush_req_offset = nbio_v2_3_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v2_3_get_hdp_flush_done_offset,
.get_pcie_index_offset = nbio_v2_3_get_pcie_index_offset,
.get_pcie_data_offset = nbio_v2_3_get_pcie_data_offset,
.get_rev_id = nbio_v2_3_get_rev_id,
.mc_access_enable = nbio_v2_3_mc_access_enable,
.get_memsize = nbio_v2_3_get_memsize,
.sdma_doorbell_range = nbio_v2_3_sdma_doorbell_range,
.vcn_doorbell_range = nbio_v2_3_vcn_doorbell_range,
.enable_doorbell_aperture = nbio_v2_3_enable_doorbell_aperture,
.enable_doorbell_selfring_aperture = nbio_v2_3_enable_doorbell_selfring_aperture,
.ih_doorbell_range = nbio_v2_3_ih_doorbell_range,
.update_medium_grain_clock_gating = nbio_v2_3_update_medium_grain_clock_gating,
.update_medium_grain_light_sleep = nbio_v2_3_update_medium_grain_light_sleep,
.get_clockgating_state = nbio_v2_3_get_clockgating_state,
.ih_control = nbio_v2_3_ih_control,
.init_registers = nbio_v2_3_init_registers,
.remap_hdp_registers = nbio_v2_3_remap_hdp_registers,
.enable_aspm = nbio_v2_3_enable_aspm,
.program_aspm = nbio_v2_3_program_aspm,
.apply_lc_spc_mode_wa = nbio_v2_3_apply_lc_spc_mode_wa,
.apply_l1_link_width_reconfig_wa = nbio_v2_3_apply_l1_link_width_reconfig_wa,
.clear_doorbell_interrupt = nbio_v2_3_clear_doorbell_interrupt,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c |
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "gfxhub_v3_0.h"
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
#include "gc/gc_11_0_0_default.h"
#include "navi10_enum.h"
#include "soc15_common.h"
static const char * const gfxhub_client_ids[] = {
"CB/DB",
"Reserved",
"GE1",
"GE2",
"CPF",
"CPC",
"CPG",
"RLC",
"TCP",
"SQC (inst)",
"SQC (data)",
"SQG",
"Reserved",
"SDMA0",
"SDMA1",
"GCR",
"SDMA2",
"SDMA3",
};
static uint32_t gfxhub_v3_0_get_invalidate_req(unsigned int vmid,
uint32_t flush_type)
{
u32 req = 0;
/* invalidate using legacy mode on vmid*/
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
PER_VMID_INVALIDATE_REQ, 1 << vmid);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
return req;
}
static void
gfxhub_v3_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
uint32_t status)
{
u32 cid = REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, CID);
dev_err(adev->dev,
"GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
status);
dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" : gfxhub_client_ids[cid],
cid);
dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
dev_err(adev->dev, "\t RW: 0x%lx\n",
REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, RW));
}
static u64 gfxhub_v3_0_get_fb_location(struct amdgpu_device *adev)
{
u64 base = RREG32_SOC15(GC, 0, regGCMC_VM_FB_LOCATION_BASE);
base &= GCMC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
base <<= 24;
return base;
}
static u64 gfxhub_v3_0_get_mc_fb_offset(struct amdgpu_device *adev)
{
return (u64)RREG32_SOC15(GC, 0, regGCMC_VM_FB_OFFSET) << 24;
}
static void gfxhub_v3_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
hub->ctx_addr_distance * vmid,
lower_32_bits(page_table_base));
WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
hub->ctx_addr_distance * vmid,
upper_32_bits(page_table_base));
}
static void gfxhub_v3_0_init_gart_aperture_regs(struct amdgpu_device *adev)
{
uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
gfxhub_v3_0_setup_vm_pt_regs(adev, 0, pt_base);
WREG32_SOC15(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
(u32)(adev->gmc.gart_start >> 12));
WREG32_SOC15(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
(u32)(adev->gmc.gart_start >> 44));
WREG32_SOC15(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
(u32)(adev->gmc.gart_end >> 12));
WREG32_SOC15(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
(u32)(adev->gmc.gart_end >> 44));
}
static void gfxhub_v3_0_init_system_aperture_regs(struct amdgpu_device *adev)
{
uint64_t value;
/* Program the AGP BAR */
WREG32_SOC15(GC, 0, regGCMC_VM_AGP_BASE, 0);
WREG32_SOC15(GC, 0, regGCMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
WREG32_SOC15(GC, 0, regGCMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
/* Program the system aperture low logical page number. */
WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start
+ adev->vm_manager.vram_base_offset;
WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
(u32)(value >> 44));
/* Program "protection fault". */
WREG32_SOC15(GC, 0, regGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
(u32)(adev->dummy_page_addr >> 12));
WREG32_SOC15(GC, 0, regGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
(u32)((u64)adev->dummy_page_addr >> 44));
WREG32_FIELD15_PREREG(GC, 0, GCVM_L2_PROTECTION_FAULT_CNTL2,
ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
}
static void gfxhub_v3_0_init_tlb_regs(struct amdgpu_device *adev)
{
uint32_t tmp;
/* Setup TLB control */
tmp = RREG32_SOC15(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL);
tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
ENABLE_ADVANCED_DRIVER_MODEL, 1);
tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
MTYPE, MTYPE_UC); /* UC, uncached */
WREG32_SOC15(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL, tmp);
}
static void gfxhub_v3_0_init_cache_regs(struct amdgpu_device *adev)
{
uint32_t tmp;
/* These registers are not accessible to VF-SRIOV.
* The PF will program them instead.
*/
if (amdgpu_sriov_vf(adev))
return;
/* Setup L2 cache */
tmp = RREG32_SOC15(GC, 0, regGCVM_L2_CNTL);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_CACHE, 1);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL,
ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
/* XXX for emulation, Refer to closed source code.*/
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL,
L2_PDE0_CACHE_TAG_GENERATION_MODE, 0);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
WREG32_SOC15(GC, 0, regGCVM_L2_CNTL, tmp);
tmp = RREG32_SOC15(GC, 0, regGCVM_L2_CNTL2);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
WREG32_SOC15(GC, 0, regGCVM_L2_CNTL2, tmp);
tmp = regGCVM_L2_CNTL3_DEFAULT;
if (adev->gmc.translate_further) {
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 12);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
} else {
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 9);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
}
WREG32_SOC15(GC, 0, regGCVM_L2_CNTL3, tmp);
tmp = regGCVM_L2_CNTL4_DEFAULT;
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
WREG32_SOC15(GC, 0, regGCVM_L2_CNTL4, tmp);
tmp = regGCVM_L2_CNTL5_DEFAULT;
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0);
WREG32_SOC15(GC, 0, regGCVM_L2_CNTL5, tmp);
}
static void gfxhub_v3_0_enable_system_domain(struct amdgpu_device *adev)
{
uint32_t tmp;
tmp = RREG32_SOC15(GC, 0, regGCVM_CONTEXT0_CNTL);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15(GC, 0, regGCVM_CONTEXT0_CNTL, tmp);
}
static void gfxhub_v3_0_disable_identity_aperture(struct amdgpu_device *adev)
{
/* These registers are not accessible to VF-SRIOV.
* The PF will program them instead.
*/
if (amdgpu_sriov_vf(adev))
return;
WREG32_SOC15(GC, 0, regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
0xFFFFFFFF);
WREG32_SOC15(GC, 0, regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
0x0000000F);
WREG32_SOC15(GC, 0, regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
0);
WREG32_SOC15(GC, 0, regGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
0);
WREG32_SOC15(GC, 0, regGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
WREG32_SOC15(GC, 0, regGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
}
static void gfxhub_v3_0_setup_vmid_config(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
int i;
uint32_t tmp;
for (i = 0; i <= 14; i++) {
tmp = RREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_CNTL, i);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
adev->vm_manager.num_level);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
PAGE_TABLE_BLOCK_SIZE,
adev->vm_manager.block_size - 9);
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
!amdgpu_noretry);
WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_CNTL,
i * hub->ctx_distance, tmp);
WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
i * hub->ctx_addr_distance, 0);
WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
i * hub->ctx_addr_distance, 0);
WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
i * hub->ctx_addr_distance,
lower_32_bits(adev->vm_manager.max_pfn - 1));
WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
i * hub->ctx_addr_distance,
upper_32_bits(adev->vm_manager.max_pfn - 1));
}
hub->vm_cntx_cntl = tmp;
}
static void gfxhub_v3_0_program_invalidation(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
unsigned int i;
for (i = 0 ; i < 18; ++i) {
WREG32_SOC15_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
i * hub->eng_addr_distance, 0xffffffff);
WREG32_SOC15_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
i * hub->eng_addr_distance, 0x1f);
}
}
static int gfxhub_v3_0_gart_enable(struct amdgpu_device *adev)
{
if (amdgpu_sriov_vf(adev)) {
/*
* GCMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
* VF copy registers so vbios post doesn't program them, for
* SRIOV driver need to program them
*/
WREG32_SOC15(GC, 0, regGCMC_VM_FB_LOCATION_BASE,
adev->gmc.vram_start >> 24);
WREG32_SOC15(GC, 0, regGCMC_VM_FB_LOCATION_TOP,
adev->gmc.vram_end >> 24);
}
/* GART Enable. */
gfxhub_v3_0_init_gart_aperture_regs(adev);
gfxhub_v3_0_init_system_aperture_regs(adev);
gfxhub_v3_0_init_tlb_regs(adev);
gfxhub_v3_0_init_cache_regs(adev);
gfxhub_v3_0_enable_system_domain(adev);
gfxhub_v3_0_disable_identity_aperture(adev);
gfxhub_v3_0_setup_vmid_config(adev);
gfxhub_v3_0_program_invalidation(adev);
return 0;
}
static void gfxhub_v3_0_gart_disable(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
u32 tmp;
u32 i;
/* Disable all tables */
for (i = 0; i < 16; i++)
WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT0_CNTL,
i * hub->ctx_distance, 0);
/* Setup TLB control */
tmp = RREG32_SOC15(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL);
tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
ENABLE_ADVANCED_DRIVER_MODEL, 0);
WREG32_SOC15(GC, 0, regGCMC_VM_MX_L1_TLB_CNTL, tmp);
/* Setup L2 cache */
WREG32_FIELD15_PREREG(GC, 0, GCVM_L2_CNTL, ENABLE_L2_CACHE, 0);
WREG32_SOC15(GC, 0, regGCVM_L2_CNTL3, 0);
}
/**
* gfxhub_v3_0_set_fault_enable_default - update GART/VM fault handling
*
* @adev: amdgpu_device pointer
* @value: true redirects VM faults to the default page
*/
static void gfxhub_v3_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value)
{
u32 tmp;
/* NO halt CP when page fault */
tmp = RREG32_SOC15(GC, 0, regCP_DEBUG);
tmp = REG_SET_FIELD(tmp, CP_DEBUG, CPG_UTCL1_ERROR_HALT_DISABLE, 1);
WREG32_SOC15(GC, 0, regCP_DEBUG, tmp);
/* These registers are not accessible to VF-SRIOV.
* The PF will program them instead.
*/
if (amdgpu_sriov_vf(adev))
return;
tmp = RREG32_SOC15(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
value);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
if (!value) {
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
CRASH_ON_NO_RETRY_FAULT, 1);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
CRASH_ON_RETRY_FAULT, 1);
}
WREG32_SOC15(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL, tmp);
}
static const struct amdgpu_vmhub_funcs gfxhub_v3_0_vmhub_funcs = {
.print_l2_protection_fault_status = gfxhub_v3_0_print_l2_protection_fault_status,
.get_invalidate_req = gfxhub_v3_0_get_invalidate_req,
};
static void gfxhub_v3_0_init(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
hub->ctx0_ptb_addr_lo32 =
SOC15_REG_OFFSET(GC, 0,
regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
hub->ctx0_ptb_addr_hi32 =
SOC15_REG_OFFSET(GC, 0,
regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
hub->vm_inv_eng0_sem =
SOC15_REG_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_SEM);
hub->vm_inv_eng0_req =
SOC15_REG_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_REQ);
hub->vm_inv_eng0_ack =
SOC15_REG_OFFSET(GC, 0, regGCVM_INVALIDATE_ENG0_ACK);
hub->vm_context0_cntl =
SOC15_REG_OFFSET(GC, 0, regGCVM_CONTEXT0_CNTL);
hub->vm_l2_pro_fault_status =
SOC15_REG_OFFSET(GC, 0, regGCVM_L2_PROTECTION_FAULT_STATUS);
hub->vm_l2_pro_fault_cntl =
SOC15_REG_OFFSET(GC, 0, regGCVM_L2_PROTECTION_FAULT_CNTL);
hub->ctx_distance = regGCVM_CONTEXT1_CNTL - regGCVM_CONTEXT0_CNTL;
hub->ctx_addr_distance = regGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
hub->eng_distance = regGCVM_INVALIDATE_ENG1_REQ -
regGCVM_INVALIDATE_ENG0_REQ;
hub->eng_addr_distance = regGCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
regGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
hub->vm_cntx_cntl_vm_fault = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
hub->vmhub_funcs = &gfxhub_v3_0_vmhub_funcs;
}
const struct amdgpu_gfxhub_funcs gfxhub_v3_0_funcs = {
.get_fb_location = gfxhub_v3_0_get_fb_location,
.get_mc_fb_offset = gfxhub_v3_0_get_mc_fb_offset,
.setup_vm_pt_regs = gfxhub_v3_0_setup_vm_pt_regs,
.gart_enable = gfxhub_v3_0_gart_enable,
.gart_disable = gfxhub_v3_0_gart_disable,
.set_fault_enable_default = gfxhub_v3_0_set_fault_enable_default,
.init = gfxhub_v3_0_init,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "hdp_v5_0.h"
#include "hdp/hdp_5_0_0_offset.h"
#include "hdp/hdp_5_0_0_sh_mask.h"
#include <uapi/linux/kfd_ioctl.h>
static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
else
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
} else {
amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
}
}
static void hdp_v5_0_update_mem_power_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t hdp_clk_cntl, hdp_clk_cntl1;
uint32_t hdp_mem_pwr_cntl;
if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_HDP_DS |
AMD_CG_SUPPORT_HDP_SD)))
return;
hdp_clk_cntl = hdp_clk_cntl1 = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
/* Before doing clock/power mode switch,
* forced on IPH & RC clock */
hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
IPH_MEM_CLK_SOFT_OVERRIDE, 1);
hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
RC_MEM_CLK_SOFT_OVERRIDE, 1);
WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
/* HDP 5.0 doesn't support dynamic power mode switch,
* disable clock and power gating before any changing */
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
IPH_MEM_POWER_CTRL_EN, 0);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
IPH_MEM_POWER_LS_EN, 0);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
IPH_MEM_POWER_DS_EN, 0);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
IPH_MEM_POWER_SD_EN, 0);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
RC_MEM_POWER_CTRL_EN, 0);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
RC_MEM_POWER_LS_EN, 0);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
RC_MEM_POWER_DS_EN, 0);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
RC_MEM_POWER_SD_EN, 0);
WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
/* Already disabled above. The actions below are for "enabled" only */
if (enable) {
/* only one clock gating mode (LS/DS/SD) can be enabled */
if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
HDP_MEM_POWER_CTRL,
IPH_MEM_POWER_LS_EN, 1);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
HDP_MEM_POWER_CTRL,
RC_MEM_POWER_LS_EN, 1);
} else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
HDP_MEM_POWER_CTRL,
IPH_MEM_POWER_DS_EN, 1);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
HDP_MEM_POWER_CTRL,
RC_MEM_POWER_DS_EN, 1);
} else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
HDP_MEM_POWER_CTRL,
IPH_MEM_POWER_SD_EN, 1);
/* RC should not use shut down mode, fallback to ds or ls if allowed */
if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS)
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
HDP_MEM_POWER_CTRL,
RC_MEM_POWER_DS_EN, 1);
else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
HDP_MEM_POWER_CTRL,
RC_MEM_POWER_LS_EN, 1);
}
/* confirmed that IPH_MEM_POWER_CTRL_EN and RC_MEM_POWER_CTRL_EN have to
* be set for SRAM LS/DS/SD */
if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
AMD_CG_SUPPORT_HDP_SD)) {
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
IPH_MEM_POWER_CTRL_EN, 1);
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
RC_MEM_POWER_CTRL_EN, 1);
WREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
}
}
/* disable IPH & RC clock override after clock/power mode changing */
hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
IPH_MEM_CLK_SOFT_OVERRIDE, 0);
hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
RC_MEM_CLK_SOFT_OVERRIDE, 0);
WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
}
static void hdp_v5_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t hdp_clk_cntl;
if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
return;
hdp_clk_cntl = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
if (enable) {
hdp_clk_cntl &=
~(uint32_t)
(HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
} else {
hdp_clk_cntl |= HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
}
WREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL, hdp_clk_cntl);
}
static void hdp_v5_0_update_clock_gating(struct amdgpu_device *adev,
bool enable)
{
hdp_v5_0_update_mem_power_gating(adev, enable);
hdp_v5_0_update_medium_grain_clock_gating(adev, enable);
}
static void hdp_v5_0_get_clockgating_state(struct amdgpu_device *adev,
u64 *flags)
{
uint32_t tmp;
/* AMD_CG_SUPPORT_HDP_MGCG */
tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL);
if (!(tmp & (HDP_CLK_CNTL__IPH_MEM_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
*flags |= AMD_CG_SUPPORT_HDP_MGCG;
/* AMD_CG_SUPPORT_HDP_LS/DS/SD */
tmp = RREG32_SOC15(HDP, 0, mmHDP_MEM_POWER_CTRL);
if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK)
*flags |= AMD_CG_SUPPORT_HDP_LS;
else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_DS_EN_MASK)
*flags |= AMD_CG_SUPPORT_HDP_DS;
else if (tmp & HDP_MEM_POWER_CTRL__IPH_MEM_POWER_SD_EN_MASK)
*flags |= AMD_CG_SUPPORT_HDP_SD;
}
static void hdp_v5_0_init_registers(struct amdgpu_device *adev)
{
u32 tmp;
tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL);
tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp);
}
const struct amdgpu_hdp_funcs hdp_v5_0_funcs = {
.flush_hdp = hdp_v5_0_flush_hdp,
.invalidate_hdp = hdp_v5_0_invalidate_hdp,
.update_clock_gating = hdp_v5_0_update_clock_gating,
.get_clock_gating_state = hdp_v5_0_get_clockgating_state,
.init_registers = hdp_v5_0_init_registers,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2016-2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Christian König, Felix Kuehling
*/
#include "amdgpu.h"
/**
* DOC: mem_info_preempt_used
*
* The amdgpu driver provides a sysfs API for reporting current total amount of
* used preemptible memory.
* The file mem_info_preempt_used is used for this, and returns the current
* used size of the preemptible block, in bytes
*/
static ssize_t mem_info_preempt_used_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
struct ttm_resource_manager *man = &adev->mman.preempt_mgr;
return sysfs_emit(buf, "%llu\n", ttm_resource_manager_usage(man));
}
static DEVICE_ATTR_RO(mem_info_preempt_used);
/**
* amdgpu_preempt_mgr_new - allocate a new node
*
* @man: TTM memory type manager
* @tbo: TTM BO we need this range for
* @place: placement flags and restrictions
* @res: TTM memory object
*
* Dummy, just count the space used without allocating resources or any limit.
*/
static int amdgpu_preempt_mgr_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *tbo,
const struct ttm_place *place,
struct ttm_resource **res)
{
*res = kzalloc(sizeof(**res), GFP_KERNEL);
if (!*res)
return -ENOMEM;
ttm_resource_init(tbo, place, *res);
(*res)->start = AMDGPU_BO_INVALID_OFFSET;
return 0;
}
/**
* amdgpu_preempt_mgr_del - free ranges
*
* @man: TTM memory type manager
* @res: TTM memory object
*
* Free the allocated GTT again.
*/
static void amdgpu_preempt_mgr_del(struct ttm_resource_manager *man,
struct ttm_resource *res)
{
ttm_resource_fini(man, res);
kfree(res);
}
static const struct ttm_resource_manager_func amdgpu_preempt_mgr_func = {
.alloc = amdgpu_preempt_mgr_new,
.free = amdgpu_preempt_mgr_del,
};
/**
* amdgpu_preempt_mgr_init - init PREEMPT manager and DRM MM
*
* @adev: amdgpu_device pointer
*
* Allocate and initialize the GTT manager.
*/
int amdgpu_preempt_mgr_init(struct amdgpu_device *adev)
{
struct ttm_resource_manager *man = &adev->mman.preempt_mgr;
int ret;
man->use_tt = true;
man->func = &amdgpu_preempt_mgr_func;
ttm_resource_manager_init(man, &adev->mman.bdev, (1 << 30));
ret = device_create_file(adev->dev, &dev_attr_mem_info_preempt_used);
if (ret) {
DRM_ERROR("Failed to create device file mem_info_preempt_used\n");
return ret;
}
ttm_set_driver_manager(&adev->mman.bdev, AMDGPU_PL_PREEMPT, man);
ttm_resource_manager_set_used(man, true);
return 0;
}
/**
* amdgpu_preempt_mgr_fini - free and destroy GTT manager
*
* @adev: amdgpu_device pointer
*
* Destroy and free the GTT manager, returns -EBUSY if ranges are still
* allocated inside it.
*/
void amdgpu_preempt_mgr_fini(struct amdgpu_device *adev)
{
struct ttm_resource_manager *man = &adev->mman.preempt_mgr;
int ret;
ttm_resource_manager_set_used(man, false);
ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
if (ret)
return;
device_remove_file(adev->dev, &dev_attr_mem_info_preempt_used);
ttm_resource_manager_cleanup(man);
ttm_set_driver_manager(&adev->mman.bdev, AMDGPU_PL_PREEMPT, NULL);
}
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c |
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/pci.h>
#include "amdgpu.h"
#include "amdgpu_ucode.h"
#include "amdgpu_trace.h"
#include "gc/gc_10_1_0_offset.h"
#include "gc/gc_10_1_0_sh_mask.h"
#include "ivsrcid/sdma0/irqsrcs_sdma0_5_0.h"
#include "ivsrcid/sdma1/irqsrcs_sdma1_5_0.h"
#include "soc15_common.h"
#include "soc15.h"
#include "navi10_sdma_pkt_open.h"
#include "nbio_v2_3.h"
#include "sdma_common.h"
#include "sdma_v5_0.h"
MODULE_FIRMWARE("amdgpu/navi10_sdma.bin");
MODULE_FIRMWARE("amdgpu/navi10_sdma1.bin");
MODULE_FIRMWARE("amdgpu/navi14_sdma.bin");
MODULE_FIRMWARE("amdgpu/navi14_sdma1.bin");
MODULE_FIRMWARE("amdgpu/navi12_sdma.bin");
MODULE_FIRMWARE("amdgpu/navi12_sdma1.bin");
MODULE_FIRMWARE("amdgpu/cyan_skillfish2_sdma.bin");
MODULE_FIRMWARE("amdgpu/cyan_skillfish2_sdma1.bin");
#define SDMA1_REG_OFFSET 0x600
#define SDMA0_HYP_DEC_REG_START 0x5880
#define SDMA0_HYP_DEC_REG_END 0x5893
#define SDMA1_HYP_DEC_REG_OFFSET 0x20
static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev);
static const struct soc15_reg_golden golden_settings_sdma_5[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_UTCL1_PAGE, 0x00ffffff, 0x000c5c00),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_UTCL1_PAGE, 0x00ffffff, 0x000c5c00)
};
static const struct soc15_reg_golden golden_settings_sdma_5_sriov[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
};
static const struct soc15_reg_golden golden_settings_sdma_nv10[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
};
static const struct soc15_reg_golden golden_settings_sdma_nv14[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
};
static const struct soc15_reg_golden golden_settings_sdma_nv12[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
};
static const struct soc15_reg_golden golden_settings_sdma_cyan_skillfish[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_UTCL1_PAGE, 0x007fffff, 0x004c5c00),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_CHICKEN_BITS, 0xffbf1f0f, 0x03ab0107),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_UTCL1_PAGE, 0x007fffff, 0x004c5c00)
};
static u32 sdma_v5_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset)
{
u32 base;
if (internal_offset >= SDMA0_HYP_DEC_REG_START &&
internal_offset <= SDMA0_HYP_DEC_REG_END) {
base = adev->reg_offset[GC_HWIP][0][1];
if (instance == 1)
internal_offset += SDMA1_HYP_DEC_REG_OFFSET;
} else {
base = adev->reg_offset[GC_HWIP][0][0];
if (instance == 1)
internal_offset += SDMA1_REG_OFFSET;
}
return base + internal_offset;
}
static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->ip_versions[SDMA0_HWIP][0]) {
case IP_VERSION(5, 0, 0):
soc15_program_register_sequence(adev,
golden_settings_sdma_5,
(const u32)ARRAY_SIZE(golden_settings_sdma_5));
soc15_program_register_sequence(adev,
golden_settings_sdma_nv10,
(const u32)ARRAY_SIZE(golden_settings_sdma_nv10));
break;
case IP_VERSION(5, 0, 2):
soc15_program_register_sequence(adev,
golden_settings_sdma_5,
(const u32)ARRAY_SIZE(golden_settings_sdma_5));
soc15_program_register_sequence(adev,
golden_settings_sdma_nv14,
(const u32)ARRAY_SIZE(golden_settings_sdma_nv14));
break;
case IP_VERSION(5, 0, 5):
if (amdgpu_sriov_vf(adev))
soc15_program_register_sequence(adev,
golden_settings_sdma_5_sriov,
(const u32)ARRAY_SIZE(golden_settings_sdma_5_sriov));
else
soc15_program_register_sequence(adev,
golden_settings_sdma_5,
(const u32)ARRAY_SIZE(golden_settings_sdma_5));
soc15_program_register_sequence(adev,
golden_settings_sdma_nv12,
(const u32)ARRAY_SIZE(golden_settings_sdma_nv12));
break;
case IP_VERSION(5, 0, 1):
soc15_program_register_sequence(adev,
golden_settings_sdma_cyan_skillfish,
(const u32)ARRAY_SIZE(golden_settings_sdma_cyan_skillfish));
break;
default:
break;
}
}
/**
* sdma_v5_0_init_microcode - load ucode images from disk
*
* @adev: amdgpu_device pointer
*
* Use the firmware interface to load the ucode images into
* the driver (not loaded into hw).
* Returns 0 on success, error on failure.
*/
// emulation only, won't work on real chip
// navi10 real chip need to use PSP to load firmware
static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
{
int ret, i;
for (i = 0; i < adev->sdma.num_instances; i++) {
ret = amdgpu_sdma_init_microcode(adev, i, false);
if (ret)
return ret;
}
return ret;
}
static unsigned sdma_v5_0_ring_init_cond_exec(struct amdgpu_ring *ring)
{
unsigned ret;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COND_EXE));
amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
amdgpu_ring_write(ring, 1);
ret = ring->wptr & ring->buf_mask;/* this is the offset we need patch later */
amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */
return ret;
}
static void sdma_v5_0_ring_patch_cond_exec(struct amdgpu_ring *ring,
unsigned offset)
{
unsigned cur;
BUG_ON(offset > ring->buf_mask);
BUG_ON(ring->ring[offset] != 0x55aa55aa);
cur = (ring->wptr - 1) & ring->buf_mask;
if (cur > offset)
ring->ring[offset] = cur - offset;
else
ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
}
/**
* sdma_v5_0_ring_get_rptr - get the current read pointer
*
* @ring: amdgpu ring pointer
*
* Get the current rptr from the hardware (NAVI10+).
*/
static uint64_t sdma_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
{
u64 *rptr;
/* XXX check if swapping is necessary on BE */
rptr = (u64 *)ring->rptr_cpu_addr;
DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr);
return ((*rptr) >> 2);
}
/**
* sdma_v5_0_ring_get_wptr - get the current write pointer
*
* @ring: amdgpu ring pointer
*
* Get the current wptr from the hardware (NAVI10+).
*/
static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u64 wptr;
if (ring->use_doorbell) {
/* XXX check if swapping is necessary on BE */
wptr = READ_ONCE(*((u64 *)ring->wptr_cpu_addr));
DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
} else {
wptr = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI));
wptr = wptr << 32;
wptr |= RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR));
DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n", ring->me, wptr);
}
return wptr >> 2;
}
/**
* sdma_v5_0_ring_set_wptr - commit the write pointer
*
* @ring: amdgpu ring pointer
*
* Write the wptr back to the hardware (NAVI10+).
*/
static void sdma_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
uint32_t *wptr_saved;
uint32_t *is_queue_unmap;
uint64_t aggregated_db_index;
uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_DMA].mqd_size;
DRM_DEBUG("Setting write pointer\n");
if (ring->is_mes_queue) {
wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
sizeof(uint32_t));
aggregated_db_index =
amdgpu_mes_get_aggregated_doorbell_index(adev,
AMDGPU_MES_PRIORITY_LEVEL_NORMAL);
atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
ring->wptr << 2);
*wptr_saved = ring->wptr << 2;
if (*is_queue_unmap) {
WDOORBELL64(aggregated_db_index, ring->wptr << 2);
DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
ring->doorbell_index, ring->wptr << 2);
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
} else {
DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
ring->doorbell_index, ring->wptr << 2);
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
if (*is_queue_unmap)
WDOORBELL64(aggregated_db_index,
ring->wptr << 2);
}
} else {
if (ring->use_doorbell) {
DRM_DEBUG("Using doorbell -- "
"wptr_offs == 0x%08x "
"lower_32_bits(ring->wptr) << 2 == 0x%08x "
"upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
ring->wptr_offs,
lower_32_bits(ring->wptr << 2),
upper_32_bits(ring->wptr << 2));
/* XXX check if swapping is necessary on BE */
atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
ring->wptr << 2);
DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
ring->doorbell_index, ring->wptr << 2);
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
} else {
DRM_DEBUG("Not using doorbell -- "
"mmSDMA%i_GFX_RB_WPTR == 0x%08x "
"mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
ring->me,
lower_32_bits(ring->wptr << 2),
ring->me,
upper_32_bits(ring->wptr << 2));
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev,
ring->me, mmSDMA0_GFX_RB_WPTR),
lower_32_bits(ring->wptr << 2));
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev,
ring->me, mmSDMA0_GFX_RB_WPTR_HI),
upper_32_bits(ring->wptr << 2));
}
}
}
static void sdma_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
int i;
for (i = 0; i < count; i++)
if (sdma && sdma->burst_nop && (i == 0))
amdgpu_ring_write(ring, ring->funcs->nop |
SDMA_PKT_NOP_HEADER_COUNT(count - 1));
else
amdgpu_ring_write(ring, ring->funcs->nop);
}
/**
* sdma_v5_0_ring_emit_ib - Schedule an IB on the DMA engine
*
* @ring: amdgpu ring pointer
* @job: job to retrieve vmid from
* @ib: IB object to schedule
* @flags: unused
*
* Schedule an IB in the DMA ring (NAVI10).
*/
static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_job *job,
struct amdgpu_ib *ib,
uint32_t flags)
{
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
/* An IB packet must end on a 8 DW boundary--the next dword
* must be on a 8-dword boundary. Our IB packet below is 6
* dwords long, thus add x number of NOPs, such that, in
* modular arithmetic,
* wptr + 6 + x = 8k, k >= 0, which in C is,
* (wptr + 6 + x) % 8 = 0.
* The expression below, is a solution of x.
*/
sdma_v5_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
/* base must be 32 byte aligned */
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, ib->length_dw);
amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr));
amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr));
}
/**
* sdma_v5_0_ring_emit_mem_sync - flush the IB by graphics cache rinse
*
* @ring: amdgpu ring pointer
*
* flush the IB by graphics cache rinse.
*/
static void sdma_v5_0_ring_emit_mem_sync(struct amdgpu_ring *ring)
{
uint32_t gcr_cntl = SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | SDMA_GCR_GLM_INV |
SDMA_GCR_GL1_INV | SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV |
SDMA_GCR_GLI_INV(1);
/* flush entire cache L0/L1/L2, this can be optimized by performance requirement */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));
amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD1_BASE_VA_31_7(0));
amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD2_GCR_CONTROL_15_0(gcr_cntl) |
SDMA_PKT_GCR_REQ_PAYLOAD2_BASE_VA_47_32(0));
amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD3_LIMIT_VA_31_7(0) |
SDMA_PKT_GCR_REQ_PAYLOAD3_GCR_CONTROL_18_16(gcr_cntl >> 16));
amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD4_LIMIT_VA_47_32(0) |
SDMA_PKT_GCR_REQ_PAYLOAD4_VMID(0));
}
/**
* sdma_v5_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
*
* @ring: amdgpu ring pointer
*
* Emit an hdp flush packet on the requested DMA ring.
*/
static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask = 0;
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
if (ring->me == 0)
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
else
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
amdgpu_ring_write(ring, ref_and_mask); /* reference */
amdgpu_ring_write(ring, ref_and_mask); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
/**
* sdma_v5_0_ring_emit_fence - emit a fence on the DMA ring
*
* @ring: amdgpu ring pointer
* @addr: address
* @seq: sequence number
* @flags: fence related flags
*
* Add a DMA fence packet to the ring to write
* the fence seq number and DMA trap packet to generate
* an interrupt if needed (NAVI10).
*/
static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags)
{
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
/* write the fence */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) |
SDMA_PKT_FENCE_HEADER_MTYPE(0x3)); /* Ucached(UC) */
/* zero in first two bits */
BUG_ON(addr & 0x3);
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, lower_32_bits(seq));
/* optionally write high bits as well */
if (write64bit) {
addr += 4;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) |
SDMA_PKT_FENCE_HEADER_MTYPE(0x3));
/* zero in first two bits */
BUG_ON(addr & 0x3);
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(seq));
}
if (flags & AMDGPU_FENCE_FLAG_INT) {
uint32_t ctx = ring->is_mes_queue ?
(ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0;
/* generate an interrupt */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx));
}
}
/**
* sdma_v5_0_gfx_stop - stop the gfx async dma engines
*
* @adev: amdgpu_device pointer
*
* Stop the gfx async dma ring buffers (NAVI10).
*/
static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
{
u32 rb_cntl, ib_cntl;
int i;
amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
}
}
/**
* sdma_v5_0_rlc_stop - stop the compute async dma engines
*
* @adev: amdgpu_device pointer
*
* Stop the compute async dma queues (NAVI10).
*/
static void sdma_v5_0_rlc_stop(struct amdgpu_device *adev)
{
/* XXX todo */
}
/**
* sdma_v5_0_ctx_switch_enable - stop the async dma engines context switch
*
* @adev: amdgpu_device pointer
* @enable: enable/disable the DMA MEs context switch.
*
* Halt or unhalt the async dma engines context switch (NAVI10).
*/
static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
{
u32 f32_cntl = 0, phase_quantum = 0;
int i;
if (amdgpu_sdma_phase_quantum) {
unsigned value = amdgpu_sdma_phase_quantum;
unsigned unit = 0;
while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
value = (value + 1) >> 1;
unit++;
}
if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
WARN_ONCE(1,
"clamping sdma_phase_quantum to %uK clock cycles\n",
value << unit);
}
phase_quantum =
value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
}
for (i = 0; i < adev->sdma.num_instances; i++) {
if (!amdgpu_sriov_vf(adev)) {
f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
AUTO_CTXSW_ENABLE, enable ? 1 : 0);
}
if (enable && amdgpu_sdma_phase_quantum) {
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
phase_quantum);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
phase_quantum);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
phase_quantum);
}
if (!amdgpu_sriov_vf(adev))
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
}
}
/**
* sdma_v5_0_enable - stop the async dma engines
*
* @adev: amdgpu_device pointer
* @enable: enable/disable the DMA MEs.
*
* Halt or unhalt the async dma engines (NAVI10).
*/
static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable)
{
u32 f32_cntl;
int i;
if (!enable) {
sdma_v5_0_gfx_stop(adev);
sdma_v5_0_rlc_stop(adev);
}
if (amdgpu_sriov_vf(adev))
return;
for (i = 0; i < adev->sdma.num_instances; i++) {
f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
}
}
/**
* sdma_v5_0_gfx_resume - setup and start the async dma engines
*
* @adev: amdgpu_device pointer
*
* Set up the gfx DMA ring buffers and enable them (NAVI10).
* Returns 0 for success, error for failure.
*/
static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
u32 rb_cntl, ib_cntl;
u32 rb_bufsz;
u32 doorbell;
u32 doorbell_offset;
u32 temp;
u32 wptr_poll_cntl;
u64 wptr_gpu_addr;
int i, r;
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
if (!amdgpu_sriov_vf(adev))
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
/* Set ring buffer size in dwords */
rb_bufsz = order_base_2(ring->ring_size / 4);
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
#ifdef __BIG_ENDIAN
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
RPTR_WRITEBACK_SWAP_ENABLE, 1);
#endif
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
/* Initialize the ring buffer's read and write pointers */
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
/* setup the wptr shadow polling */
wptr_gpu_addr = ring->wptr_gpu_addr;
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
lower_32_bits(wptr_gpu_addr));
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
upper_32_bits(wptr_gpu_addr));
wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i,
mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
SDMA0_GFX_RB_WPTR_POLL_CNTL,
F32_POLL_ENABLE, 1);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL),
wptr_poll_cntl);
/* set the wb address whether it's enabled or not */
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE),
ring->gpu_addr >> 8);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI),
ring->gpu_addr >> 40);
ring->wptr = 0;
/* before programing wptr to a less value, need set minor_ptr_update first */
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR),
lower_32_bits(ring->wptr << 2));
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI),
upper_32_bits(ring->wptr << 2));
}
doorbell = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i,
mmSDMA0_GFX_DOORBELL_OFFSET));
if (ring->use_doorbell) {
doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
OFFSET, ring->doorbell_index);
} else {
doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
}
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET),
doorbell_offset);
adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
ring->doorbell_index, 20);
if (amdgpu_sriov_vf(adev))
sdma_v5_0_ring_set_wptr(ring);
/* set minor_ptr_update to 0 after wptr programed */
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
if (!amdgpu_sriov_vf(adev)) {
/* set utc l1 enable flag always to 1 */
temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
/* enable MCBP */
temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
/* Set up RESP_MODE to non-copy addresses */
temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
/* program default cache read and write policy */
temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
/* clean read policy and write policy bits */
temp &= 0xFF0FFF;
temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
}
if (!amdgpu_sriov_vf(adev)) {
/* unhalt engine */
temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
}
/* enable DMA RB */
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
#ifdef __BIG_ENDIAN
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
#endif
/* enable DMA IBs */
WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
sdma_v5_0_ctx_switch_enable(adev, true);
sdma_v5_0_enable(adev, true);
}
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
return 0;
}
/**
* sdma_v5_0_rlc_resume - setup and start the async dma engines
*
* @adev: amdgpu_device pointer
*
* Set up the compute DMA queues and enable them (NAVI10).
* Returns 0 for success, error for failure.
*/
static int sdma_v5_0_rlc_resume(struct amdgpu_device *adev)
{
return 0;
}
/**
* sdma_v5_0_load_microcode - load the sDMA ME ucode
*
* @adev: amdgpu_device pointer
*
* Loads the sDMA0/1 ucode.
* Returns 0 for success, -EINVAL if the ucode is not available.
*/
static int sdma_v5_0_load_microcode(struct amdgpu_device *adev)
{
const struct sdma_firmware_header_v1_0 *hdr;
const __le32 *fw_data;
u32 fw_size;
int i, j;
/* halt the MEs */
sdma_v5_0_enable(adev, false);
for (i = 0; i < adev->sdma.num_instances; i++) {
if (!adev->sdma.instance[i].fw)
return -EINVAL;
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
amdgpu_ucode_print_sdma_hdr(&hdr->header);
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
fw_data = (const __le32 *)
(adev->sdma.instance[i].fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), 0);
for (j = 0; j < fw_size; j++) {
if (amdgpu_emu_mode == 1 && j % 500 == 0)
msleep(1);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
}
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version);
}
return 0;
}
/**
* sdma_v5_0_start - setup and start the async dma engines
*
* @adev: amdgpu_device pointer
*
* Set up the DMA engines and enable them (NAVI10).
* Returns 0 for success, error for failure.
*/
static int sdma_v5_0_start(struct amdgpu_device *adev)
{
int r = 0;
if (amdgpu_sriov_vf(adev)) {
sdma_v5_0_ctx_switch_enable(adev, false);
sdma_v5_0_enable(adev, false);
/* set RB registers */
r = sdma_v5_0_gfx_resume(adev);
return r;
}
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
r = sdma_v5_0_load_microcode(adev);
if (r)
return r;
}
/* unhalt the MEs */
sdma_v5_0_enable(adev, true);
/* enable sdma ring preemption */
sdma_v5_0_ctx_switch_enable(adev, true);
/* start the gfx rings and rlc compute queues */
r = sdma_v5_0_gfx_resume(adev);
if (r)
return r;
r = sdma_v5_0_rlc_resume(adev);
return r;
}
static int sdma_v5_0_mqd_init(struct amdgpu_device *adev, void *mqd,
struct amdgpu_mqd_prop *prop)
{
struct v10_sdma_mqd *m = mqd;
uint64_t wb_gpu_addr;
m->sdmax_rlcx_rb_cntl =
order_base_2(prop->queue_size / 4) << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT |
1 << SDMA0_RLC0_RB_CNTL__RB_PRIV__SHIFT;
m->sdmax_rlcx_rb_base = lower_32_bits(prop->hqd_base_gpu_addr >> 8);
m->sdmax_rlcx_rb_base_hi = upper_32_bits(prop->hqd_base_gpu_addr >> 8);
m->sdmax_rlcx_rb_wptr_poll_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, 0,
mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
wb_gpu_addr = prop->wptr_gpu_addr;
m->sdmax_rlcx_rb_wptr_poll_addr_lo = lower_32_bits(wb_gpu_addr);
m->sdmax_rlcx_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr);
wb_gpu_addr = prop->rptr_gpu_addr;
m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits(wb_gpu_addr);
m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits(wb_gpu_addr);
m->sdmax_rlcx_ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, 0,
mmSDMA0_GFX_IB_CNTL));
m->sdmax_rlcx_doorbell_offset =
prop->doorbell_index << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT;
m->sdmax_rlcx_doorbell = REG_SET_FIELD(0, SDMA0_RLC0_DOORBELL, ENABLE, 1);
return 0;
}
static void sdma_v5_0_set_mqd_funcs(struct amdgpu_device *adev)
{
adev->mqds[AMDGPU_HW_IP_DMA].mqd_size = sizeof(struct v10_sdma_mqd);
adev->mqds[AMDGPU_HW_IP_DMA].init_mqd = sdma_v5_0_mqd_init;
}
/**
* sdma_v5_0_ring_test_ring - simple async dma engine test
*
* @ring: amdgpu_ring structure holding ring information
*
* Test the DMA engine by writing using it to write an
* value to memory. (NAVI10).
* Returns 0 for success, error for failure.
*/
static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
unsigned i;
unsigned index;
int r;
u32 tmp;
u64 gpu_addr;
volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
if (ring->is_mes_queue) {
uint32_t offset = 0;
offset = amdgpu_mes_ctx_get_offs(ring,
AMDGPU_MES_CTX_PADDING_OFFS);
gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
*cpu_ptr = tmp;
} else {
r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
return r;
}
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(tmp);
}
r = amdgpu_ring_alloc(ring, 20);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
amdgpu_device_wb_free(adev, index);
return r;
}
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
if (ring->is_mes_queue)
tmp = le32_to_cpu(*cpu_ptr);
else
tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
if (amdgpu_emu_mode == 1)
msleep(1);
else
udelay(1);
}
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
if (!ring->is_mes_queue)
amdgpu_device_wb_free(adev, index);
return r;
}
/**
* sdma_v5_0_ring_test_ib - test an IB on the DMA engine
*
* @ring: amdgpu_ring structure holding ring information
* @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
*
* Test a simple IB in the DMA ring (NAVI10).
* Returns 0 on success, error on failure.
*/
static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct dma_fence *f = NULL;
unsigned index;
long r;
u32 tmp = 0;
u64 gpu_addr;
volatile uint32_t *cpu_ptr = NULL;
tmp = 0xCAFEDEAD;
memset(&ib, 0, sizeof(ib));
if (ring->is_mes_queue) {
uint32_t offset = 0;
offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
offset = amdgpu_mes_ctx_get_offs(ring,
AMDGPU_MES_CTX_PADDING_OFFS);
gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
*cpu_ptr = tmp;
} else {
r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
return r;
}
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(tmp);
r = amdgpu_ib_get(adev, NULL, 256,
AMDGPU_IB_POOL_DIRECT, &ib);
if (r) {
DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
goto err0;
}
}
ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
ib.ptr[1] = lower_32_bits(gpu_addr);
ib.ptr[2] = upper_32_bits(gpu_addr);
ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0);
ib.ptr[4] = 0xDEADBEEF;
ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
ib.length_dw = 8;
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
goto err1;
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
goto err1;
} else if (r < 0) {
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
}
if (ring->is_mes_queue)
tmp = le32_to_cpu(*cpu_ptr);
else
tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
r = 0;
else
r = -EINVAL;
err1:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err0:
if (!ring->is_mes_queue)
amdgpu_device_wb_free(adev, index);
return r;
}
/**
* sdma_v5_0_vm_copy_pte - update PTEs by copying them from the GART
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @src: src addr to copy from
* @count: number of page entries to update
*
* Update PTEs by copying them from the GART using sDMA (NAVI10).
*/
static void sdma_v5_0_vm_copy_pte(struct amdgpu_ib *ib,
uint64_t pe, uint64_t src,
unsigned count)
{
unsigned bytes = count * 8;
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
ib->ptr[ib->length_dw++] = bytes - 1;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src);
ib->ptr[ib->length_dw++] = upper_32_bits(src);
ib->ptr[ib->length_dw++] = lower_32_bits(pe);
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
}
/**
* sdma_v5_0_vm_write_pte - update PTEs by writing them manually
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @value: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
*
* Update PTEs by writing them manually using sDMA (NAVI10).
*/
static void sdma_v5_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
uint64_t value, unsigned count,
uint32_t incr)
{
unsigned ndw = count * 2;
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
ib->ptr[ib->length_dw++] = lower_32_bits(pe);
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = ndw - 1;
for (; ndw > 0; ndw -= 2) {
ib->ptr[ib->length_dw++] = lower_32_bits(value);
ib->ptr[ib->length_dw++] = upper_32_bits(value);
value += incr;
}
}
/**
* sdma_v5_0_vm_set_pte_pde - update the page tables using sDMA
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: access flags
*
* Update the page tables using sDMA (NAVI10).
*/
static void sdma_v5_0_vm_set_pte_pde(struct amdgpu_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint64_t flags)
{
/* for physically contiguous pages (vram) */
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE);
ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
ib->ptr[ib->length_dw++] = upper_32_bits(flags);
ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
ib->ptr[ib->length_dw++] = incr; /* increment size */
ib->ptr[ib->length_dw++] = 0;
ib->ptr[ib->length_dw++] = count - 1; /* number of entries */
}
/**
* sdma_v5_0_ring_pad_ib - pad the IB
* @ring: amdgpu_ring structure holding ring information
* @ib: indirect buffer to fill with padding
*
* Pad the IB with NOPs to a boundary multiple of 8.
*/
static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
u32 pad_count;
int i;
pad_count = (-ib->length_dw) & 0x7;
for (i = 0; i < pad_count; i++)
if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =
SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
else
ib->ptr[ib->length_dw++] =
SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
}
/**
* sdma_v5_0_ring_emit_pipeline_sync - sync the pipeline
*
* @ring: amdgpu_ring pointer
*
* Make sure all previous operations are completed (CIK).
*/
static void sdma_v5_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
/* wait for idle */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); /* reference */
amdgpu_ring_write(ring, 0xffffffff); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
}
/**
* sdma_v5_0_ring_emit_vm_flush - vm flush using sDMA
*
* @ring: amdgpu_ring pointer
* @vmid: vmid number to use
* @pd_addr: address
*
* Update the page table base and flush the VM TLB
* using sDMA (NAVI10).
*/
static void sdma_v5_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
}
static void sdma_v5_0_ring_emit_wreg(struct amdgpu_ring *ring,
uint32_t reg, uint32_t val)
{
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, val);
}
static void sdma_v5_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val, uint32_t mask)
{
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
amdgpu_ring_write(ring, reg << 2);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, val); /* reference */
amdgpu_ring_write(ring, mask); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
}
static void sdma_v5_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
uint32_t reg0, uint32_t reg1,
uint32_t ref, uint32_t mask)
{
amdgpu_ring_emit_wreg(ring, reg0, ref);
/* wait for a cycle to reset vm_inv_eng*_ack */
amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0);
amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
}
static int sdma_v5_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
sdma_v5_0_set_ring_funcs(adev);
sdma_v5_0_set_buffer_funcs(adev);
sdma_v5_0_set_vm_pte_funcs(adev);
sdma_v5_0_set_irq_funcs(adev);
sdma_v5_0_set_mqd_funcs(adev);
return 0;
}
static int sdma_v5_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SDMA trap event */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0,
SDMA0_5_0__SRCID__SDMA_TRAP,
&adev->sdma.trap_irq);
if (r)
return r;
/* SDMA trap event */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1,
SDMA1_5_0__SRCID__SDMA_TRAP,
&adev->sdma.trap_irq);
if (r)
return r;
r = sdma_v5_0_init_microcode(adev);
if (r) {
DRM_ERROR("Failed to load sdma firmware!\n");
return r;
}
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
ring->use_doorbell = true;
DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
ring->use_doorbell?"true":"false");
ring->doorbell_index = (i == 0) ?
(adev->doorbell_index.sdma_engine[0] << 1) //get DWORD offset
: (adev->doorbell_index.sdma_engine[1] << 1); // get DWORD offset
ring->vm_hub = AMDGPU_GFXHUB(0);
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
(i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
AMDGPU_SDMA_IRQ_INSTANCE1,
AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
return r;
}
static int sdma_v5_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
amdgpu_sdma_destroy_inst_ctx(adev, false);
return 0;
}
static int sdma_v5_0_hw_init(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
sdma_v5_0_init_golden_registers(adev);
r = sdma_v5_0_start(adev);
return r;
}
static int sdma_v5_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (amdgpu_sriov_vf(adev)) {
/* disable the scheduler for SDMA */
amdgpu_sdma_unset_buffer_funcs_helper(adev);
return 0;
}
sdma_v5_0_ctx_switch_enable(adev, false);
sdma_v5_0_enable(adev, false);
return 0;
}
static int sdma_v5_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return sdma_v5_0_hw_fini(adev);
}
static int sdma_v5_0_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return sdma_v5_0_hw_init(adev);
}
static bool sdma_v5_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 i;
for (i = 0; i < adev->sdma.num_instances; i++) {
u32 tmp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_STATUS_REG));
if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
return false;
}
return true;
}
static int sdma_v5_0_wait_for_idle(void *handle)
{
unsigned i;
u32 sdma0, sdma1;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
sdma0 = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG));
sdma1 = RREG32(sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_STATUS_REG));
if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK)
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
static int sdma_v5_0_soft_reset(void *handle)
{
/* todo */
return 0;
}
static int sdma_v5_0_ring_preempt_ib(struct amdgpu_ring *ring)
{
int i, r = 0;
struct amdgpu_device *adev = ring->adev;
u32 index = 0;
u64 sdma_gfx_preempt;
amdgpu_sdma_get_index_from_ring(ring, &index);
if (index == 0)
sdma_gfx_preempt = mmSDMA0_GFX_PREEMPT;
else
sdma_gfx_preempt = mmSDMA1_GFX_PREEMPT;
/* assert preemption condition */
amdgpu_ring_set_preempt_cond_exec(ring, false);
/* emit the trailing fence */
ring->trail_seq += 1;
amdgpu_ring_alloc(ring, 10);
sdma_v5_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
ring->trail_seq, 0);
amdgpu_ring_commit(ring);
/* assert IB preemption */
WREG32(sdma_gfx_preempt, 1);
/* poll the trailing fence */
for (i = 0; i < adev->usec_timeout; i++) {
if (ring->trail_seq ==
le32_to_cpu(*(ring->trail_fence_cpu_addr)))
break;
udelay(1);
}
if (i >= adev->usec_timeout) {
r = -EINVAL;
DRM_ERROR("ring %d failed to be preempted\n", ring->idx);
}
/* deassert IB preemption */
WREG32(sdma_gfx_preempt, 0);
/* deassert the preemption condition */
amdgpu_ring_set_preempt_cond_exec(ring, true);
return r;
}
static int sdma_v5_0_set_trap_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
u32 sdma_cntl;
if (!amdgpu_sriov_vf(adev)) {
u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ?
sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
sdma_cntl = RREG32(reg_offset);
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
WREG32(reg_offset, sdma_cntl);
}
return 0;
}
static int sdma_v5_0_process_trap_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
uint32_t mes_queue_id = entry->src_data[0];
DRM_DEBUG("IH: SDMA trap\n");
if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
struct amdgpu_mes_queue *queue;
mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
spin_lock(&adev->mes.queue_id_lock);
queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
if (queue) {
DRM_DEBUG("process smda queue id = %d\n", mes_queue_id);
amdgpu_fence_process(queue->ring);
}
spin_unlock(&adev->mes.queue_id_lock);
return 0;
}
switch (entry->client_id) {
case SOC15_IH_CLIENTID_SDMA0:
switch (entry->ring_id) {
case 0:
amdgpu_fence_process(&adev->sdma.instance[0].ring);
break;
case 1:
/* XXX compute */
break;
case 2:
/* XXX compute */
break;
case 3:
/* XXX page queue*/
break;
}
break;
case SOC15_IH_CLIENTID_SDMA1:
switch (entry->ring_id) {
case 0:
amdgpu_fence_process(&adev->sdma.instance[1].ring);
break;
case 1:
/* XXX compute */
break;
case 2:
/* XXX compute */
break;
case 3:
/* XXX page queue*/
break;
}
break;
}
return 0;
}
static int sdma_v5_0_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
return 0;
}
static void sdma_v5_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t data, def;
int i;
for (i = 0; i < adev->sdma.num_instances; i++) {
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
/* Enable sdma clock gating */
def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL));
data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
if (def != data)
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data);
} else {
/* Disable sdma clock gating */
def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL));
data |= (SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
if (def != data)
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data);
}
}
}
static void sdma_v5_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
bool enable)
{
uint32_t data, def;
int i;
for (i = 0; i < adev->sdma.num_instances; i++) {
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
/* Enable sdma mem light sleep */
def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL));
data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (def != data)
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data);
} else {
/* Disable sdma mem light sleep */
def = data = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL));
data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (def != data)
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data);
}
}
}
static int sdma_v5_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (amdgpu_sriov_vf(adev))
return 0;
switch (adev->ip_versions[SDMA0_HWIP][0]) {
case IP_VERSION(5, 0, 0):
case IP_VERSION(5, 0, 2):
case IP_VERSION(5, 0, 5):
sdma_v5_0_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
sdma_v5_0_update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE);
break;
default:
break;
}
return 0;
}
static int sdma_v5_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
return 0;
}
static void sdma_v5_0_get_clockgating_state(void *handle, u64 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int data;
if (amdgpu_sriov_vf(adev))
*flags = 0;
/* AMD_CG_SUPPORT_SDMA_MGCG */
data = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CLK_CTRL));
if (!(data & SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK))
*flags |= AMD_CG_SUPPORT_SDMA_MGCG;
/* AMD_CG_SUPPORT_SDMA_LS */
data = RREG32(sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_POWER_CNTL));
if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
*flags |= AMD_CG_SUPPORT_SDMA_LS;
}
const struct amd_ip_funcs sdma_v5_0_ip_funcs = {
.name = "sdma_v5_0",
.early_init = sdma_v5_0_early_init,
.late_init = NULL,
.sw_init = sdma_v5_0_sw_init,
.sw_fini = sdma_v5_0_sw_fini,
.hw_init = sdma_v5_0_hw_init,
.hw_fini = sdma_v5_0_hw_fini,
.suspend = sdma_v5_0_suspend,
.resume = sdma_v5_0_resume,
.is_idle = sdma_v5_0_is_idle,
.wait_for_idle = sdma_v5_0_wait_for_idle,
.soft_reset = sdma_v5_0_soft_reset,
.set_clockgating_state = sdma_v5_0_set_clockgating_state,
.set_powergating_state = sdma_v5_0_set_powergating_state,
.get_clockgating_state = sdma_v5_0_get_clockgating_state,
};
static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
.type = AMDGPU_RING_TYPE_SDMA,
.align_mask = 0xf,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
.secure_submission_supported = true,
.get_rptr = sdma_v5_0_ring_get_rptr,
.get_wptr = sdma_v5_0_ring_get_wptr,
.set_wptr = sdma_v5_0_ring_set_wptr,
.emit_frame_size =
5 + /* sdma_v5_0_ring_init_cond_exec */
6 + /* sdma_v5_0_ring_emit_hdp_flush */
3 + /* hdp_invalidate */
6 + /* sdma_v5_0_ring_emit_pipeline_sync */
/* sdma_v5_0_ring_emit_vm_flush */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 +
10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 5 + 7 + 6, /* sdma_v5_0_ring_emit_ib */
.emit_ib = sdma_v5_0_ring_emit_ib,
.emit_mem_sync = sdma_v5_0_ring_emit_mem_sync,
.emit_fence = sdma_v5_0_ring_emit_fence,
.emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync,
.emit_vm_flush = sdma_v5_0_ring_emit_vm_flush,
.emit_hdp_flush = sdma_v5_0_ring_emit_hdp_flush,
.test_ring = sdma_v5_0_ring_test_ring,
.test_ib = sdma_v5_0_ring_test_ib,
.insert_nop = sdma_v5_0_ring_insert_nop,
.pad_ib = sdma_v5_0_ring_pad_ib,
.emit_wreg = sdma_v5_0_ring_emit_wreg,
.emit_reg_wait = sdma_v5_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = sdma_v5_0_ring_emit_reg_write_reg_wait,
.init_cond_exec = sdma_v5_0_ring_init_cond_exec,
.patch_cond_exec = sdma_v5_0_ring_patch_cond_exec,
.preempt_ib = sdma_v5_0_ring_preempt_ib,
};
static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.instance[i].ring.funcs = &sdma_v5_0_ring_funcs;
adev->sdma.instance[i].ring.me = i;
}
}
static const struct amdgpu_irq_src_funcs sdma_v5_0_trap_irq_funcs = {
.set = sdma_v5_0_set_trap_irq_state,
.process = sdma_v5_0_process_trap_irq,
};
static const struct amdgpu_irq_src_funcs sdma_v5_0_illegal_inst_irq_funcs = {
.process = sdma_v5_0_process_illegal_inst_irq,
};
static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 +
adev->sdma.num_instances;
adev->sdma.trap_irq.funcs = &sdma_v5_0_trap_irq_funcs;
adev->sdma.illegal_inst_irq.funcs = &sdma_v5_0_illegal_inst_irq_funcs;
}
/**
* sdma_v5_0_emit_copy_buffer - copy buffer using the sDMA engine
*
* @ib: indirect buffer to copy to
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
* @tmz: if a secure copy should be used
*
* Copy GPU buffers using the DMA engine (NAVI10).
* Used by the amdgpu ttm implementation to move pages if
* registered as the asic copy callback.
*/
static void sdma_v5_0_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
uint32_t byte_count,
bool tmz)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
ib->ptr[ib->length_dw++] = byte_count - 1;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
}
/**
* sdma_v5_0_emit_fill_buffer - fill buffer using the sDMA engine
*
* @ib: indirect buffer to fill
* @src_data: value to write to buffer
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
*
* Fill GPU buffers using the DMA engine (NAVI10).
*/
static void sdma_v5_0_emit_fill_buffer(struct amdgpu_ib *ib,
uint32_t src_data,
uint64_t dst_offset,
uint32_t byte_count)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = src_data;
ib->ptr[ib->length_dw++] = byte_count - 1;
}
static const struct amdgpu_buffer_funcs sdma_v5_0_buffer_funcs = {
.copy_max_bytes = 0x400000,
.copy_num_dw = 7,
.emit_copy_buffer = sdma_v5_0_emit_copy_buffer,
.fill_max_bytes = 0x400000,
.fill_num_dw = 5,
.emit_fill_buffer = sdma_v5_0_emit_fill_buffer,
};
static void sdma_v5_0_set_buffer_funcs(struct amdgpu_device *adev)
{
if (adev->mman.buffer_funcs == NULL) {
adev->mman.buffer_funcs = &sdma_v5_0_buffer_funcs;
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
}
static const struct amdgpu_vm_pte_funcs sdma_v5_0_vm_pte_funcs = {
.copy_pte_num_dw = 7,
.copy_pte = sdma_v5_0_vm_copy_pte,
.write_pte = sdma_v5_0_vm_write_pte,
.set_pte_pde = sdma_v5_0_vm_set_pte_pde,
};
static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev)
{
unsigned i;
if (adev->vm_manager.vm_pte_funcs == NULL) {
adev->vm_manager.vm_pte_funcs = &sdma_v5_0_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) {
adev->vm_manager.vm_pte_scheds[i] =
&adev->sdma.instance[i].ring.sched;
}
adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
}
}
const struct amdgpu_ip_block_version sdma_v5_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_SDMA,
.major = 5,
.minor = 0,
.rev = 0,
.funcs = &sdma_v5_0_ip_funcs,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "amdgpu_ras.h"
#include "mmhub_v9_4.h"
#include "mmhub/mmhub_9_4_1_offset.h"
#include "mmhub/mmhub_9_4_1_sh_mask.h"
#include "mmhub/mmhub_9_4_1_default.h"
#include "athub/athub_1_0_offset.h"
#include "athub/athub_1_0_sh_mask.h"
#include "vega10_enum.h"
#include "soc15.h"
#include "soc15_common.h"
#define MMHUB_NUM_INSTANCES 2
#define MMHUB_INSTANCE_REGISTER_OFFSET 0x3000
static u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev)
{
/* The base should be same b/t 2 mmhubs on Acrturus. Read one here. */
u64 base = RREG32_SOC15(MMHUB, 0, mmVMSHAREDVC0_MC_VM_FB_LOCATION_BASE);
u64 top = RREG32_SOC15(MMHUB, 0, mmVMSHAREDVC0_MC_VM_FB_LOCATION_TOP);
base &= VMSHAREDVC0_MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
base <<= 24;
top &= VMSHAREDVC0_MC_VM_FB_LOCATION_TOP__FB_TOP_MASK;
top <<= 24;
adev->gmc.fb_start = base;
adev->gmc.fb_end = top;
return base;
}
static void mmhub_v9_4_setup_hubid_vm_pt_regs(struct amdgpu_device *adev, int hubid,
uint32_t vmid, uint64_t value)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
WREG32_SOC15_OFFSET(MMHUB, 0,
mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
hub->ctx_addr_distance * vmid + hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
lower_32_bits(value));
WREG32_SOC15_OFFSET(MMHUB, 0,
mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
hub->ctx_addr_distance * vmid + hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
upper_32_bits(value));
}
static void mmhub_v9_4_init_gart_aperture_regs(struct amdgpu_device *adev,
int hubid)
{
uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
mmhub_v9_4_setup_hubid_vm_pt_regs(adev, hubid, 0, pt_base);
WREG32_SOC15_OFFSET(MMHUB, 0,
mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
(u32)(adev->gmc.gart_start >> 12));
WREG32_SOC15_OFFSET(MMHUB, 0,
mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
(u32)(adev->gmc.gart_start >> 44));
WREG32_SOC15_OFFSET(MMHUB, 0,
mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
(u32)(adev->gmc.gart_end >> 12));
WREG32_SOC15_OFFSET(MMHUB, 0,
mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
(u32)(adev->gmc.gart_end >> 44));
}
static void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base)
{
int i;
for (i = 0; i < MMHUB_NUM_INSTANCES; i++)
mmhub_v9_4_setup_hubid_vm_pt_regs(adev, i, vmid,
page_table_base);
}
static void mmhub_v9_4_init_system_aperture_regs(struct amdgpu_device *adev,
int hubid)
{
uint64_t value;
uint32_t tmp;
/* Program the AGP BAR */
WREG32_SOC15_OFFSET(MMHUB, 0, mmVMSHAREDVC0_MC_VM_AGP_BASE,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
0);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVMSHAREDVC0_MC_VM_AGP_TOP,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
adev->gmc.agp_end >> 24);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVMSHAREDVC0_MC_VM_AGP_BOT,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
adev->gmc.agp_start >> 24);
if (!amdgpu_sriov_vf(adev)) {
/* Program the system aperture low logical page number. */
WREG32_SOC15_OFFSET(
MMHUB, 0, mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
WREG32_SOC15_OFFSET(
MMHUB, 0, mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
WREG32_SOC15_OFFSET(
MMHUB, 0,
mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
(u32)(value >> 12));
WREG32_SOC15_OFFSET(
MMHUB, 0,
mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
(u32)(value >> 44));
/* Program "protection fault". */
WREG32_SOC15_OFFSET(
MMHUB, 0,
mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
(u32)(adev->dummy_page_addr >> 12));
WREG32_SOC15_OFFSET(
MMHUB, 0,
mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
(u32)((u64)adev->dummy_page_addr >> 44));
tmp = RREG32_SOC15_OFFSET(
MMHUB, 0, mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
WREG32_SOC15_OFFSET(MMHUB, 0,
mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET,
tmp);
}
}
static void mmhub_v9_4_init_tlb_regs(struct amdgpu_device *adev, int hubid)
{
uint32_t tmp;
/* Setup TLB control */
tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
ENABLE_L1_TLB, 1);
tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
SYSTEM_ACCESS_MODE, 3);
tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
ENABLE_ADVANCED_DRIVER_MODEL, 1);
tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
MTYPE, MTYPE_UC);/* XXX for emulation. */
tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
ATC_EN, 1);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
}
static void mmhub_v9_4_init_cache_regs(struct amdgpu_device *adev, int hubid)
{
uint32_t tmp;
/* Setup L2 cache */
tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
ENABLE_L2_CACHE, 1);
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
ENABLE_L2_FRAGMENT_PROCESSING, 1);
/* XXX for emulation, Refer to closed source code.*/
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
L2_PDE0_CACHE_TAG_GENERATION_MODE, 0);
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
PDE_FAULT_CLASSIFICATION, 0);
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
CONTEXT1_IDENTITY_ACCESS_MODE, 1);
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
IDENTITY_MODE_FRAGMENT_SIZE, 0);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL2,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL2,
INVALIDATE_ALL_L1_TLBS, 1);
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL2,
INVALIDATE_L2_CACHE, 1);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL2,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
tmp = mmVML2PF0_VM_L2_CNTL3_DEFAULT;
if (adev->gmc.translate_further) {
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, BANK_SELECT, 12);
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3,
L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
} else {
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3, BANK_SELECT, 9);
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL3,
L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
}
WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL3,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
tmp = mmVML2PF0_VM_L2_CNTL4_DEFAULT;
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL4,
VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL4,
VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL4,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
}
static void mmhub_v9_4_enable_system_domain(struct amdgpu_device *adev,
int hubid)
{
uint32_t tmp;
tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_CNTL,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_CNTL,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
}
static void mmhub_v9_4_disable_identity_aperture(struct amdgpu_device *adev,
int hubid)
{
WREG32_SOC15_OFFSET(MMHUB, 0,
mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0XFFFFFFFF);
WREG32_SOC15_OFFSET(MMHUB, 0,
mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0x0000000F);
WREG32_SOC15_OFFSET(MMHUB, 0,
mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0);
WREG32_SOC15_OFFSET(MMHUB, 0,
mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0);
WREG32_SOC15_OFFSET(MMHUB, 0,
mmVML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0);
WREG32_SOC15_OFFSET(MMHUB, 0,
mmVML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0);
}
static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
unsigned int num_level, block_size;
uint32_t tmp;
int i;
num_level = adev->vm_manager.num_level;
block_size = adev->vm_manager.block_size;
if (adev->gmc.translate_further)
num_level -= 1;
else
block_size -= 9;
for (i = 0; i <= 14; i++) {
tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
PAGE_TABLE_DEPTH,
num_level);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
1);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
PAGE_TABLE_BLOCK_SIZE,
block_size);
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
!adev->gmc.noretry);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET +
i * hub->ctx_distance, tmp);
WREG32_SOC15_OFFSET(MMHUB, 0,
mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET +
i * hub->ctx_addr_distance, 0);
WREG32_SOC15_OFFSET(MMHUB, 0,
mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET +
i * hub->ctx_addr_distance, 0);
WREG32_SOC15_OFFSET(MMHUB, 0,
mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET +
i * hub->ctx_addr_distance,
lower_32_bits(adev->vm_manager.max_pfn - 1));
WREG32_SOC15_OFFSET(MMHUB, 0,
mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET +
i * hub->ctx_addr_distance,
upper_32_bits(adev->vm_manager.max_pfn - 1));
}
}
static void mmhub_v9_4_program_invalidation(struct amdgpu_device *adev,
int hubid)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
unsigned i;
for (i = 0; i < 18; ++i) {
WREG32_SOC15_OFFSET(MMHUB, 0,
mmVML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET +
i * hub->eng_addr_distance,
0xffffffff);
WREG32_SOC15_OFFSET(MMHUB, 0,
mmVML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
hubid * MMHUB_INSTANCE_REGISTER_OFFSET +
i * hub->eng_addr_distance,
0x1f);
}
}
static int mmhub_v9_4_gart_enable(struct amdgpu_device *adev)
{
int i;
for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
/* GART Enable. */
mmhub_v9_4_init_gart_aperture_regs(adev, i);
mmhub_v9_4_init_system_aperture_regs(adev, i);
mmhub_v9_4_init_tlb_regs(adev, i);
if (!amdgpu_sriov_vf(adev))
mmhub_v9_4_init_cache_regs(adev, i);
mmhub_v9_4_enable_system_domain(adev, i);
if (!amdgpu_sriov_vf(adev))
mmhub_v9_4_disable_identity_aperture(adev, i);
mmhub_v9_4_setup_vmid_config(adev, i);
mmhub_v9_4_program_invalidation(adev, i);
}
return 0;
}
static void mmhub_v9_4_gart_disable(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
u32 tmp;
u32 i, j;
for (j = 0; j < MMHUB_NUM_INSTANCES; j++) {
/* Disable all tables */
for (i = 0; i < AMDGPU_NUM_VMID; i++)
WREG32_SOC15_OFFSET(MMHUB, 0,
mmVML2VC0_VM_CONTEXT0_CNTL,
j * MMHUB_INSTANCE_REGISTER_OFFSET +
i * hub->ctx_distance, 0);
/* Setup TLB control */
tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
j * MMHUB_INSTANCE_REGISTER_OFFSET);
tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
ENABLE_L1_TLB, 0);
tmp = REG_SET_FIELD(tmp,
VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
ENABLE_ADVANCED_DRIVER_MODEL, 0);
WREG32_SOC15_OFFSET(MMHUB, 0,
mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
j * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
/* Setup L2 cache */
tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL,
j * MMHUB_INSTANCE_REGISTER_OFFSET);
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL,
ENABLE_L2_CACHE, 0);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL,
j * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL3,
j * MMHUB_INSTANCE_REGISTER_OFFSET, 0);
}
}
/**
* mmhub_v9_4_set_fault_enable_default - update GART/VM fault handling
*
* @adev: amdgpu_device pointer
* @value: true redirects VM faults to the default page
*/
static void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev, bool value)
{
u32 tmp;
int i;
for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
tmp = RREG32_SOC15_OFFSET(MMHUB, 0,
mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
i * MMHUB_INSTANCE_REGISTER_OFFSET);
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT,
value);
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
PDE0_PROTECTION_FAULT_ENABLE_DEFAULT,
value);
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
PDE1_PROTECTION_FAULT_ENABLE_DEFAULT,
value);
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
PDE2_PROTECTION_FAULT_ENABLE_DEFAULT,
value);
tmp = REG_SET_FIELD(tmp,
VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
value);
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
NACK_PROTECTION_FAULT_ENABLE_DEFAULT,
value);
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
value);
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
VALID_PROTECTION_FAULT_ENABLE_DEFAULT,
value);
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
READ_PROTECTION_FAULT_ENABLE_DEFAULT,
value);
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT,
value);
tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT,
value);
if (!value) {
tmp = REG_SET_FIELD(tmp,
VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
CRASH_ON_NO_RETRY_FAULT, 1);
tmp = REG_SET_FIELD(tmp,
VML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
CRASH_ON_RETRY_FAULT, 1);
}
WREG32_SOC15_OFFSET(MMHUB, 0,
mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL,
i * MMHUB_INSTANCE_REGISTER_OFFSET, tmp);
}
}
static void mmhub_v9_4_init(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub[MMHUB_NUM_INSTANCES] = {
&adev->vmhub[AMDGPU_MMHUB0(0)], &adev->vmhub[AMDGPU_MMHUB1(0)]};
int i;
for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
hub[i]->ctx0_ptb_addr_lo32 =
SOC15_REG_OFFSET(MMHUB, 0,
mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) +
i * MMHUB_INSTANCE_REGISTER_OFFSET;
hub[i]->ctx0_ptb_addr_hi32 =
SOC15_REG_OFFSET(MMHUB, 0,
mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) +
i * MMHUB_INSTANCE_REGISTER_OFFSET;
hub[i]->vm_inv_eng0_sem =
SOC15_REG_OFFSET(MMHUB, 0,
mmVML2VC0_VM_INVALIDATE_ENG0_SEM) +
i * MMHUB_INSTANCE_REGISTER_OFFSET;
hub[i]->vm_inv_eng0_req =
SOC15_REG_OFFSET(MMHUB, 0,
mmVML2VC0_VM_INVALIDATE_ENG0_REQ) +
i * MMHUB_INSTANCE_REGISTER_OFFSET;
hub[i]->vm_inv_eng0_ack =
SOC15_REG_OFFSET(MMHUB, 0,
mmVML2VC0_VM_INVALIDATE_ENG0_ACK) +
i * MMHUB_INSTANCE_REGISTER_OFFSET;
hub[i]->vm_context0_cntl =
SOC15_REG_OFFSET(MMHUB, 0,
mmVML2VC0_VM_CONTEXT0_CNTL) +
i * MMHUB_INSTANCE_REGISTER_OFFSET;
hub[i]->vm_l2_pro_fault_status =
SOC15_REG_OFFSET(MMHUB, 0,
mmVML2PF0_VM_L2_PROTECTION_FAULT_STATUS) +
i * MMHUB_INSTANCE_REGISTER_OFFSET;
hub[i]->vm_l2_pro_fault_cntl =
SOC15_REG_OFFSET(MMHUB, 0,
mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL) +
i * MMHUB_INSTANCE_REGISTER_OFFSET;
hub[i]->ctx_distance = mmVML2VC0_VM_CONTEXT1_CNTL -
mmVML2VC0_VM_CONTEXT0_CNTL;
hub[i]->ctx_addr_distance = mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
hub[i]->eng_distance = mmVML2VC0_VM_INVALIDATE_ENG1_REQ -
mmVML2VC0_VM_INVALIDATE_ENG0_REQ;
hub[i]->eng_addr_distance = mmVML2VC0_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
mmVML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
}
}
static void mmhub_v9_4_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t def, data, def1, data1;
int i, j;
int dist = mmDAGB1_CNTL_MISC2 - mmDAGB0_CNTL_MISC2;
for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
def = data = RREG32_SOC15_OFFSET(MMHUB, 0,
mmATCL2_0_ATC_L2_MISC_CG,
i * MMHUB_INSTANCE_REGISTER_OFFSET);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
data |= ATCL2_0_ATC_L2_MISC_CG__ENABLE_MASK;
else
data &= ~ATCL2_0_ATC_L2_MISC_CG__ENABLE_MASK;
if (def != data)
WREG32_SOC15_OFFSET(MMHUB, 0, mmATCL2_0_ATC_L2_MISC_CG,
i * MMHUB_INSTANCE_REGISTER_OFFSET, data);
for (j = 0; j < 5; j++) {
def1 = data1 = RREG32_SOC15_OFFSET(MMHUB, 0,
mmDAGB0_CNTL_MISC2,
i * MMHUB_INSTANCE_REGISTER_OFFSET +
j * dist);
if (enable &&
(adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
data1 &=
~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
} else {
data1 |=
(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
}
if (def1 != data1)
WREG32_SOC15_OFFSET(MMHUB, 0,
mmDAGB0_CNTL_MISC2,
i * MMHUB_INSTANCE_REGISTER_OFFSET +
j * dist, data1);
if (i == 1 && j == 3)
break;
}
}
}
static void mmhub_v9_4_update_medium_grain_light_sleep(struct amdgpu_device *adev,
bool enable)
{
uint32_t def, data;
int i;
for (i = 0; i < MMHUB_NUM_INSTANCES; i++) {
def = data = RREG32_SOC15_OFFSET(MMHUB, 0,
mmATCL2_0_ATC_L2_MISC_CG,
i * MMHUB_INSTANCE_REGISTER_OFFSET);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
data |= ATCL2_0_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
else
data &= ~ATCL2_0_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
if (def != data)
WREG32_SOC15_OFFSET(MMHUB, 0, mmATCL2_0_ATC_L2_MISC_CG,
i * MMHUB_INSTANCE_REGISTER_OFFSET, data);
}
}
static int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state)
{
if (amdgpu_sriov_vf(adev))
return 0;
switch (adev->asic_type) {
case CHIP_ARCTURUS:
mmhub_v9_4_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
mmhub_v9_4_update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE);
break;
default:
break;
}
return 0;
}
static void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u64 *flags)
{
int data, data1;
if (amdgpu_sriov_vf(adev))
*flags = 0;
/* AMD_CG_SUPPORT_MC_MGCG */
data = RREG32_SOC15(MMHUB, 0, mmATCL2_0_ATC_L2_MISC_CG);
data1 = RREG32_SOC15(MMHUB, 0, mmATCL2_0_ATC_L2_MISC_CG);
if ((data & ATCL2_0_ATC_L2_MISC_CG__ENABLE_MASK) &&
!(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)))
*flags |= AMD_CG_SUPPORT_MC_MGCG;
/* AMD_CG_SUPPORT_MC_LS */
if (data & ATCL2_0_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
*flags |= AMD_CG_SUPPORT_MC_LS;
}
static const struct soc15_ras_field_entry mmhub_v9_4_ras_fields[] = {
/* MMHUB Range 0 */
{ "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
},
{ "MMEA0_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
},
{ "MMEA0_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
},
{ "MMEA0_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
SOC15_REG_FIELD(MMEA0_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA0_EDC_CNT, RRET_TAGMEM_DED_COUNT),
},
{ "MMEA0_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
SOC15_REG_FIELD(MMEA0_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA0_EDC_CNT, WRET_TAGMEM_DED_COUNT),
},
{ "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
SOC15_REG_FIELD(MMEA0_EDC_CNT, IORD_CMDMEM_SED_COUNT),
0, 0,
},
{ "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
SOC15_REG_FIELD(MMEA0_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
0, 0,
},
{ "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT),
SOC15_REG_FIELD(MMEA0_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
0, 0,
},
{ "MMEA0_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
},
{ "MMEA0_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
},
{ "MMEA0_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
},
{ "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA0_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
},
{ "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA0_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
},
{ "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA0_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
},
{ "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA0_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
},
{ "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA0_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
},
{ "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
},
{ "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
},
{ "MMEA0_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_DED_COUNT),
},
{ "MMEA0_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D1MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D1MEM_DED_COUNT),
},
{ "MMEA0_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D2MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D2MEM_DED_COUNT),
},
{ "MMEA0_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2),
SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D3MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D3MEM_DED_COUNT),
},
/* MMHUB Range 1 */
{ "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
},
{ "MMEA1_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
},
{ "MMEA1_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
},
{ "MMEA1_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
SOC15_REG_FIELD(MMEA1_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA1_EDC_CNT, RRET_TAGMEM_DED_COUNT),
},
{ "MMEA1_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
SOC15_REG_FIELD(MMEA1_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA1_EDC_CNT, WRET_TAGMEM_DED_COUNT),
},
{ "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
SOC15_REG_FIELD(MMEA1_EDC_CNT, IORD_CMDMEM_SED_COUNT),
0, 0,
},
{ "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
SOC15_REG_FIELD(MMEA1_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
0, 0,
},
{ "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT),
SOC15_REG_FIELD(MMEA1_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
0, 0,
},
{ "MMEA1_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
},
{ "MMEA1_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
},
{ "MMEA1_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
},
{ "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA1_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
},
{ "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA1_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
},
{ "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA1_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
},
{ "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA1_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
},
{ "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA1_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
},
{ "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
},
{ "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
},
{ "MMEA1_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D0MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D0MEM_DED_COUNT),
},
{ "MMEA1_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D1MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D1MEM_DED_COUNT),
},
{ "MMEA1_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D2MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D2MEM_DED_COUNT),
},
{ "MMEA1_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2),
SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D3MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D3MEM_DED_COUNT),
},
/* MMHAB Range 2*/
{ "MMEA2_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
},
{ "MMEA2_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
},
{ "MMEA2_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
},
{ "MMEA2_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
SOC15_REG_FIELD(MMEA2_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA2_EDC_CNT, RRET_TAGMEM_DED_COUNT),
},
{ "MMEA2_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
SOC15_REG_FIELD(MMEA2_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA2_EDC_CNT, WRET_TAGMEM_DED_COUNT),
},
{ "MMEA2_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA2_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA2_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
SOC15_REG_FIELD(MMEA2_EDC_CNT, IORD_CMDMEM_SED_COUNT),
0, 0,
},
{ "MMEA2_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
SOC15_REG_FIELD(MMEA2_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
0, 0,
},
{ "MMEA2_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT),
SOC15_REG_FIELD(MMEA2_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
0, 0,
},
{ "MMEA2_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
},
{ "MMEA2_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
},
{ "MMEA2_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
},
{ "MMEA2_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA2_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA2_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA2_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
},
{ "MMEA2_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA2_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
},
{ "MMEA2_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA2_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
},
{ "MMEA2_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA2_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
},
{ "MMEA2_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA2_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
},
{ "MMEA2_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA2_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
},
{ "MMEA2_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA2_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
},
{ "MMEA2_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D0MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D0MEM_DED_COUNT),
},
{ "MMEA2_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D1MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D1MEM_DED_COUNT),
},
{ "MMEA2_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D2MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D2MEM_DED_COUNT),
},
{ "MMEA2_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2),
SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D3MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D3MEM_DED_COUNT),
},
/* MMHUB Rang 3 */
{ "MMEA3_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
},
{ "MMEA3_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
},
{ "MMEA3_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
},
{ "MMEA3_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
SOC15_REG_FIELD(MMEA3_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA3_EDC_CNT, RRET_TAGMEM_DED_COUNT),
},
{ "MMEA3_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
SOC15_REG_FIELD(MMEA3_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA3_EDC_CNT, WRET_TAGMEM_DED_COUNT),
},
{ "MMEA3_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA3_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA3_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
SOC15_REG_FIELD(MMEA3_EDC_CNT, IORD_CMDMEM_SED_COUNT),
0, 0,
},
{ "MMEA3_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
SOC15_REG_FIELD(MMEA3_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
0, 0,
},
{ "MMEA3_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT),
SOC15_REG_FIELD(MMEA3_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
0, 0,
},
{ "MMEA3_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
},
{ "MMEA3_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
},
{ "MMEA3_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
},
{ "MMEA3_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA3_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA3_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA3_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
},
{ "MMEA3_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA3_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
},
{ "MMEA3_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA3_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
},
{ "MMEA3_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA3_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
},
{ "MMEA3_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA3_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
},
{ "MMEA3_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA3_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
},
{ "MMEA3_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA3_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
},
{ "MMEA3_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D0MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D0MEM_DED_COUNT),
},
{ "MMEA3_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D1MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D1MEM_DED_COUNT),
},
{ "MMEA3_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D2MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D2MEM_DED_COUNT),
},
{ "MMEA3_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2),
SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D3MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D3MEM_DED_COUNT),
},
/* MMHUB Range 4 */
{ "MMEA4_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
},
{ "MMEA4_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
},
{ "MMEA4_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
},
{ "MMEA4_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
SOC15_REG_FIELD(MMEA4_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA4_EDC_CNT, RRET_TAGMEM_DED_COUNT),
},
{ "MMEA4_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
SOC15_REG_FIELD(MMEA4_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA4_EDC_CNT, WRET_TAGMEM_DED_COUNT),
},
{ "MMEA4_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA4_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA4_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
SOC15_REG_FIELD(MMEA4_EDC_CNT, IORD_CMDMEM_SED_COUNT),
0, 0,
},
{ "MMEA4_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
SOC15_REG_FIELD(MMEA4_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
0, 0,
},
{ "MMEA4_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT),
SOC15_REG_FIELD(MMEA4_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
0, 0,
},
{ "MMEA4_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
},
{ "MMEA4_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
},
{ "MMEA4_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
},
{ "MMEA4_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA4_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA4_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA4_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
},
{ "MMEA4_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA4_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
},
{ "MMEA4_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA4_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
},
{ "MMEA4_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA4_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
},
{ "MMEA4_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA4_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
},
{ "MMEA4_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA4_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
},
{ "MMEA4_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA4_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
},
{ "MMEA4_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D0MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D0MEM_DED_COUNT),
},
{ "MMEA4_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D1MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D1MEM_DED_COUNT),
},
{ "MMEA4_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D2MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D2MEM_DED_COUNT),
},
{ "MMEA4_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2),
SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D3MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D3MEM_DED_COUNT),
},
/* MMHUAB Range 5 */
{ "MMEA5_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
},
{ "MMEA5_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
},
{ "MMEA5_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
},
{ "MMEA5_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
SOC15_REG_FIELD(MMEA5_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA5_EDC_CNT, RRET_TAGMEM_DED_COUNT),
},
{ "MMEA5_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
SOC15_REG_FIELD(MMEA5_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA5_EDC_CNT, WRET_TAGMEM_DED_COUNT),
},
{ "MMEA5_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA5_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA5_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
SOC15_REG_FIELD(MMEA5_EDC_CNT, IORD_CMDMEM_SED_COUNT),
0, 0,
},
{ "MMEA5_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
SOC15_REG_FIELD(MMEA5_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
0, 0,
},
{ "MMEA5_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT),
SOC15_REG_FIELD(MMEA5_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
0, 0,
},
{ "MMEA5_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
},
{ "MMEA5_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
},
{ "MMEA5_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
},
{ "MMEA5_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA5_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA5_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA5_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
},
{ "MMEA5_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA5_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
},
{ "MMEA5_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA5_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
},
{ "MMEA5_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA5_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
},
{ "MMEA5_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA5_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
},
{ "MMEA5_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA5_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
},
{ "MMEA5_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA5_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
},
{ "MMEA5_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D0MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D0MEM_DED_COUNT),
},
{ "MMEA5_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D1MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D1MEM_DED_COUNT),
},
{ "MMEA5_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D2MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D2MEM_DED_COUNT),
},
{ "MMEA5_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2),
SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D3MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D3MEM_DED_COUNT),
},
/* MMHUB Range 6 */
{ "MMEA6_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
},
{ "MMEA6_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
},
{ "MMEA6_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
},
{ "MMEA6_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
SOC15_REG_FIELD(MMEA6_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA6_EDC_CNT, RRET_TAGMEM_DED_COUNT),
},
{ "MMEA6_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
SOC15_REG_FIELD(MMEA6_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA6_EDC_CNT, WRET_TAGMEM_DED_COUNT),
},
{ "MMEA6_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA6_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA6_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
SOC15_REG_FIELD(MMEA6_EDC_CNT, IORD_CMDMEM_SED_COUNT),
0, 0,
},
{ "MMEA6_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
SOC15_REG_FIELD(MMEA6_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
0, 0,
},
{ "MMEA6_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT),
SOC15_REG_FIELD(MMEA6_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
0, 0,
},
{ "MMEA6_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
},
{ "MMEA6_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
},
{ "MMEA6_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
},
{ "MMEA6_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA6_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA6_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA6_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
},
{ "MMEA6_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA6_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
},
{ "MMEA6_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA6_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
},
{ "MMEA6_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA6_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
},
{ "MMEA6_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA6_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
},
{ "MMEA6_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA6_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
},
{ "MMEA6_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA6_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
},
{ "MMEA6_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D0MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D0MEM_DED_COUNT),
},
{ "MMEA6_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D1MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D1MEM_DED_COUNT),
},
{ "MMEA6_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D2MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D2MEM_DED_COUNT),
},
{ "MMEA6_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2),
SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D3MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D3MEM_DED_COUNT),
},
/* MMHUB Range 7*/
{ "MMEA7_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
},
{ "MMEA7_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
},
{ "MMEA7_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
},
{ "MMEA7_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
SOC15_REG_FIELD(MMEA7_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA7_EDC_CNT, RRET_TAGMEM_DED_COUNT),
},
{ "MMEA7_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
SOC15_REG_FIELD(MMEA7_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA7_EDC_CNT, WRET_TAGMEM_DED_COUNT),
},
{ "MMEA7_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA7_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA7_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
SOC15_REG_FIELD(MMEA7_EDC_CNT, IORD_CMDMEM_SED_COUNT),
0, 0,
},
{ "MMEA7_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
SOC15_REG_FIELD(MMEA7_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
0, 0,
},
{ "MMEA7_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT),
SOC15_REG_FIELD(MMEA7_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
0, 0,
},
{ "MMEA7_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
},
{ "MMEA7_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
},
{ "MMEA7_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
},
{ "MMEA7_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA7_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
0, 0,
},
{ "MMEA7_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA7_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
},
{ "MMEA7_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA7_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
},
{ "MMEA7_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA7_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
},
{ "MMEA7_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA7_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
},
{ "MMEA7_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA7_EDC_CNT3, IOWR_DATAMEM_DED_COUNT),
},
{ "MMEA7_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA7_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
},
{ "MMEA7_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3),
0, 0,
SOC15_REG_FIELD(MMEA7_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
},
{ "MMEA7_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D0MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D0MEM_DED_COUNT),
},
{ "MMEA7_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D1MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D1MEM_DED_COUNT),
},
{ "MMEA7_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D2MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D2MEM_DED_COUNT),
},
{ "MMEA7_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2),
SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D3MEM_SED_COUNT),
SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D3MEM_DED_COUNT),
}
};
static const struct soc15_reg_entry mmhub_v9_4_edc_cnt_regs[] = {
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), 0, 0, 0 },
};
static int mmhub_v9_4_get_ras_error_count(struct amdgpu_device *adev,
const struct soc15_reg_entry *reg,
uint32_t value,
uint32_t *sec_count,
uint32_t *ded_count)
{
uint32_t i;
uint32_t sec_cnt, ded_cnt;
for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_ras_fields); i++) {
if (mmhub_v9_4_ras_fields[i].reg_offset != reg->reg_offset)
continue;
sec_cnt = (value &
mmhub_v9_4_ras_fields[i].sec_count_mask) >>
mmhub_v9_4_ras_fields[i].sec_count_shift;
if (sec_cnt) {
dev_info(adev->dev, "MMHUB SubBlock %s, SEC %d\n",
mmhub_v9_4_ras_fields[i].name,
sec_cnt);
*sec_count += sec_cnt;
}
ded_cnt = (value &
mmhub_v9_4_ras_fields[i].ded_count_mask) >>
mmhub_v9_4_ras_fields[i].ded_count_shift;
if (ded_cnt) {
dev_info(adev->dev, "MMHUB SubBlock %s, DED %d\n",
mmhub_v9_4_ras_fields[i].name,
ded_cnt);
*ded_count += ded_cnt;
}
}
return 0;
}
static void mmhub_v9_4_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
uint32_t sec_count = 0, ded_count = 0;
uint32_t i;
uint32_t reg_value;
err_data->ue_count = 0;
err_data->ce_count = 0;
for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_edc_cnt_regs); i++) {
reg_value =
RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_edc_cnt_regs[i]));
if (reg_value)
mmhub_v9_4_get_ras_error_count(adev, &mmhub_v9_4_edc_cnt_regs[i],
reg_value, &sec_count, &ded_count);
}
err_data->ce_count += sec_count;
err_data->ue_count += ded_count;
}
static void mmhub_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
{
uint32_t i;
/* read back edc counter registers to reset the counters to 0 */
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_edc_cnt_regs); i++)
RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_edc_cnt_regs[i]));
}
}
static const struct soc15_reg_entry mmhub_v9_4_err_status_regs[] = {
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_ERR_STATUS), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_ERR_STATUS), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_ERR_STATUS), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_ERR_STATUS), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_ERR_STATUS), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_ERR_STATUS), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_ERR_STATUS), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_ERR_STATUS), 0, 0, 0 },
};
static void mmhub_v9_4_query_ras_error_status(struct amdgpu_device *adev)
{
int i;
uint32_t reg_value;
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB))
return;
for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_err_status_regs); i++) {
reg_value =
RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_err_status_regs[i]));
if (REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_STATUS) ||
REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_WRRSP_STATUS) ||
REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) {
/* SDP read/write error/parity error in FUE_IS_FATAL mode
* can cause system fatal error in arcturas. Harvest the error
* status before GPU reset */
dev_warn(adev->dev, "MMHUB EA err detected at instance: %d, status: 0x%x!\n",
i, reg_value);
}
}
}
const struct amdgpu_ras_block_hw_ops mmhub_v9_4_ras_hw_ops = {
.query_ras_error_count = mmhub_v9_4_query_ras_error_count,
.reset_ras_error_count = mmhub_v9_4_reset_ras_error_count,
.query_ras_error_status = mmhub_v9_4_query_ras_error_status,
};
struct amdgpu_mmhub_ras mmhub_v9_4_ras = {
.ras_block = {
.hw_ops = &mmhub_v9_4_ras_hw_ops,
},
};
const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
.get_fb_location = mmhub_v9_4_get_fb_location,
.init = mmhub_v9_4_init,
.gart_enable = mmhub_v9_4_gart_enable,
.set_fault_enable_default = mmhub_v9_4_set_fault_enable_default,
.gart_disable = mmhub_v9_4_gart_disable,
.set_clockgating = mmhub_v9_4_set_clockgating,
.get_clockgating = mmhub_v9_4_get_clockgating,
.setup_vm_pt_regs = mmhub_v9_4_setup_vm_pt_regs,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c |
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_uvd.h"
#include "amdgpu_cs.h"
#include "soc15.h"
#include "soc15d.h"
#include "soc15_common.h"
#include "mmsch_v1_0.h"
#include "uvd/uvd_7_0_offset.h"
#include "uvd/uvd_7_0_sh_mask.h"
#include "vce/vce_4_0_offset.h"
#include "vce/vce_4_0_default.h"
#include "vce/vce_4_0_sh_mask.h"
#include "nbif/nbif_6_1_offset.h"
#include "mmhub/mmhub_1_0_offset.h"
#include "mmhub/mmhub_1_0_sh_mask.h"
#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
#define mmUVD_PG0_CC_UVD_HARVESTING 0x00c7
#define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX 1
//UVD_PG0_CC_UVD_HARVESTING
#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT 0x1
#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK 0x00000002L
#define UVD7_MAX_HW_INSTANCES_VEGA20 2
static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev);
static int uvd_v7_0_start(struct amdgpu_device *adev);
static void uvd_v7_0_stop(struct amdgpu_device *adev);
static int uvd_v7_0_sriov_start(struct amdgpu_device *adev);
static int amdgpu_ih_clientid_uvds[] = {
SOC15_IH_CLIENTID_UVD,
SOC15_IH_CLIENTID_UVD1
};
/**
* uvd_v7_0_ring_get_rptr - get read pointer
*
* @ring: amdgpu_ring pointer
*
* Returns the current hardware read pointer
*/
static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
}
/**
* uvd_v7_0_enc_ring_get_rptr - get enc read pointer
*
* @ring: amdgpu_ring pointer
*
* Returns the current hardware enc read pointer
*/
static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
else
return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
}
/**
* uvd_v7_0_ring_get_wptr - get write pointer
*
* @ring: amdgpu_ring pointer
*
* Returns the current hardware write pointer
*/
static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
}
/**
* uvd_v7_0_enc_ring_get_wptr - get enc write pointer
*
* @ring: amdgpu_ring pointer
*
* Returns the current hardware enc write pointer
*/
static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring->use_doorbell)
return *ring->wptr_cpu_addr;
if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
else
return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
}
/**
* uvd_v7_0_ring_set_wptr - set write pointer
*
* @ring: amdgpu_ring pointer
*
* Commits the write pointer to the hardware
*/
static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
}
/**
* uvd_v7_0_enc_ring_set_wptr - set enc write pointer
*
* @ring: amdgpu_ring pointer
*
* Commits the enc write pointer to the hardware
*/
static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring->use_doorbell) {
/* XXX check if swapping is necessary on BE */
*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
return;
}
if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR,
lower_32_bits(ring->wptr));
else
WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2,
lower_32_bits(ring->wptr));
}
/**
* uvd_v7_0_enc_ring_test_ring - test if UVD ENC ring is working
*
* @ring: the engine to test on
*
*/
static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
uint32_t rptr;
unsigned i;
int r;
if (amdgpu_sriov_vf(adev))
return 0;
r = amdgpu_ring_alloc(ring, 16);
if (r)
return r;
rptr = amdgpu_ring_get_rptr(ring);
amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
if (amdgpu_ring_get_rptr(ring) != rptr)
break;
udelay(1);
}
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
return r;
}
/**
* uvd_v7_0_enc_get_create_msg - generate a UVD ENC create msg
*
* @ring: ring we should submit the msg to
* @handle: session handle to use
* @bo: amdgpu object for which we query the offset
* @fence: optional fence to return
*
* Open up a stream for HW test
*/
static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, u32 handle,
struct amdgpu_bo *bo,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
ib = &job->ibs[0];
addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
ib->ptr[ib->length_dw++] = handle;
ib->ptr[ib->length_dw++] = 0x00000000;
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x00000014;
ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
ib->ptr[ib->length_dw++] = 0x0000001c;
ib->ptr[ib->length_dw++] = 0x00000000;
ib->ptr[ib->length_dw++] = 0x00000000;
ib->ptr[ib->length_dw++] = 0x00000008;
ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err;
if (fence)
*fence = dma_fence_get(f);
dma_fence_put(f);
return 0;
err:
amdgpu_job_free(job);
return r;
}
/**
* uvd_v7_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
*
* @ring: ring we should submit the msg to
* @handle: session handle to use
* @bo: amdgpu object for which we query the offset
* @fence: optional fence to return
*
* Close up a stream for HW test or if userspace failed to do so
*/
static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, u32 handle,
struct amdgpu_bo *bo,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
ib = &job->ibs[0];
addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001;
ib->ptr[ib->length_dw++] = handle;
ib->ptr[ib->length_dw++] = 0x00000000;
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x00000014;
ib->ptr[ib->length_dw++] = 0x00000002;
ib->ptr[ib->length_dw++] = 0x0000001c;
ib->ptr[ib->length_dw++] = 0x00000000;
ib->ptr[ib->length_dw++] = 0x00000000;
ib->ptr[ib->length_dw++] = 0x00000008;
ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err;
if (fence)
*fence = dma_fence_get(f);
dma_fence_put(f);
return 0;
err:
amdgpu_job_free(job);
return r;
}
/**
* uvd_v7_0_enc_ring_test_ib - test if UVD ENC IBs are working
*
* @ring: the engine to test on
* @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
*
*/
static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
struct amdgpu_bo *bo = ring->adev->uvd.ib_bo;
long r;
r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
if (r)
goto error;
r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
if (r)
goto error;
r = dma_fence_wait_timeout(fence, false, timeout);
if (r == 0)
r = -ETIMEDOUT;
else if (r > 0)
r = 0;
error:
dma_fence_put(fence);
return r;
}
static int uvd_v7_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->asic_type == CHIP_VEGA20) {
u32 harvest;
int i;
adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
adev->uvd.harvest_config |= 1 << i;
}
}
if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
AMDGPU_UVD_HARVEST_UVD1))
/* both instances are harvested, disable the block */
return -ENOENT;
} else {
adev->uvd.num_uvd_inst = 1;
}
if (amdgpu_sriov_vf(adev))
adev->uvd.num_enc_rings = 1;
else
adev->uvd.num_enc_rings = 2;
uvd_v7_0_set_ring_funcs(adev);
uvd_v7_0_set_enc_ring_funcs(adev);
uvd_v7_0_set_irq_funcs(adev);
return 0;
}
static int uvd_v7_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
int i, j, r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
if (adev->uvd.harvest_config & (1 << j))
continue;
/* UVD TRAP */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
if (r)
return r;
/* UVD ENC TRAP */
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq);
if (r)
return r;
}
}
r = amdgpu_uvd_sw_init(adev);
if (r)
return r;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
const struct common_firmware_header *hdr;
hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].ucode_id = AMDGPU_UCODE_ID_UVD;
adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
if (adev->uvd.num_uvd_inst == UVD7_MAX_HW_INSTANCES_VEGA20) {
adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].ucode_id = AMDGPU_UCODE_ID_UVD1;
adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].fw = adev->uvd.fw;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
}
DRM_INFO("PSP loading UVD firmware\n");
}
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
if (adev->uvd.harvest_config & (1 << j))
continue;
if (!amdgpu_sriov_vf(adev)) {
ring = &adev->uvd.inst[j].ring;
ring->vm_hub = AMDGPU_MMHUB0(0);
sprintf(ring->name, "uvd_%d", ring->me);
r = amdgpu_ring_init(adev, ring, 512,
&adev->uvd.inst[j].irq, 0,
AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.inst[j].ring_enc[i];
ring->vm_hub = AMDGPU_MMHUB0(0);
sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i);
if (amdgpu_sriov_vf(adev)) {
ring->use_doorbell = true;
/* currently only use the first enconding ring for
* sriov, so set unused location for other unused rings.
*/
if (i == 0)
ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2;
else
ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
}
r = amdgpu_ring_init(adev, ring, 512,
&adev->uvd.inst[j].irq, 0,
AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
}
r = amdgpu_uvd_resume(adev);
if (r)
return r;
r = amdgpu_uvd_entity_init(adev);
if (r)
return r;
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
return r;
return r;
}
static int uvd_v7_0_sw_fini(void *handle)
{
int i, j, r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_virt_free_mm_table(adev);
r = amdgpu_uvd_suspend(adev);
if (r)
return r;
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
if (adev->uvd.harvest_config & (1 << j))
continue;
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
}
return amdgpu_uvd_sw_fini(adev);
}
/**
* uvd_v7_0_hw_init - start and test UVD block
*
* @handle: handle used to pass amdgpu_device pointer
*
* Initialize the hardware, boot up the VCPU and do some testing
*/
static int uvd_v7_0_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
uint32_t tmp;
int i, j, r;
if (amdgpu_sriov_vf(adev))
r = uvd_v7_0_sriov_start(adev);
else
r = uvd_v7_0_start(adev);
if (r)
goto done;
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
if (adev->uvd.harvest_config & (1 << j))
continue;
ring = &adev->uvd.inst[j].ring;
if (!amdgpu_sriov_vf(adev)) {
r = amdgpu_ring_test_helper(ring);
if (r)
goto done;
r = amdgpu_ring_alloc(ring, 10);
if (r) {
DRM_ERROR("amdgpu: (%d)ring failed to lock UVD ring (%d).\n", j, r);
goto done;
}
tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0);
amdgpu_ring_write(ring, tmp);
amdgpu_ring_write(ring, 0xFFFFF);
tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0);
amdgpu_ring_write(ring, tmp);
amdgpu_ring_write(ring, 0xFFFFF);
tmp = PACKET0(SOC15_REG_OFFSET(UVD, j,
mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0);
amdgpu_ring_write(ring, tmp);
amdgpu_ring_write(ring, 0xFFFFF);
/* Clear timeout status bits */
amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
mmUVD_SEMA_TIMEOUT_STATUS), 0));
amdgpu_ring_write(ring, 0x8);
amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j,
mmUVD_SEMA_CNTL), 0));
amdgpu_ring_write(ring, 3);
amdgpu_ring_commit(ring);
}
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.inst[j].ring_enc[i];
r = amdgpu_ring_test_helper(ring);
if (r)
goto done;
}
}
done:
if (!r)
DRM_INFO("UVD and UVD ENC initialized successfully.\n");
return r;
}
/**
* uvd_v7_0_hw_fini - stop the hardware block
*
* @handle: handle used to pass amdgpu_device pointer
*
* Stop the UVD block, mark ring as not ready any more
*/
static int uvd_v7_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
cancel_delayed_work_sync(&adev->uvd.idle_work);
if (!amdgpu_sriov_vf(adev))
uvd_v7_0_stop(adev);
else {
/* full access mode, so don't touch any UVD register */
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
}
return 0;
}
static int uvd_v7_0_suspend(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/*
* Proper cleanups before halting the HW engine:
* - cancel the delayed idle work
* - enable powergating
* - enable clockgating
* - disable dpm
*
* TODO: to align with the VCN implementation, move the
* jobs for clockgating/powergating/dpm setting to
* ->set_powergating_state().
*/
cancel_delayed_work_sync(&adev->uvd.idle_work);
if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_uvd(adev, false);
} else {
amdgpu_asic_set_uvd_clocks(adev, 0, 0);
/* shutdown the UVD block */
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_GATE);
amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_GATE);
}
r = uvd_v7_0_hw_fini(adev);
if (r)
return r;
return amdgpu_uvd_suspend(adev);
}
static int uvd_v7_0_resume(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = amdgpu_uvd_resume(adev);
if (r)
return r;
return uvd_v7_0_hw_init(adev);
}
/**
* uvd_v7_0_mc_resume - memory controller programming
*
* @adev: amdgpu_device pointer
*
* Let the UVD memory controller know it's offsets
*/
static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
{
uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
uint32_t offset;
int i;
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
if (adev->uvd.harvest_config & (1 << i))
continue;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
i == 0 ?
adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo :
adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_lo);
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
i == 0 ?
adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi :
adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_hi);
WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
offset = 0;
} else {
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
lower_32_bits(adev->uvd.inst[i].gpu_addr));
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
upper_32_bits(adev->uvd.inst[i].gpu_addr));
offset = size;
WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
}
WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21));
WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE);
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21));
WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2,
AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
WREG32_SOC15(UVD, i, mmUVD_UDEC_ADDR_CONFIG,
adev->gfx.config.gb_addr_config);
WREG32_SOC15(UVD, i, mmUVD_UDEC_DB_ADDR_CONFIG,
adev->gfx.config.gb_addr_config);
WREG32_SOC15(UVD, i, mmUVD_UDEC_DBW_ADDR_CONFIG,
adev->gfx.config.gb_addr_config);
WREG32_SOC15(UVD, i, mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
}
}
static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
struct amdgpu_mm_table *table)
{
uint32_t data = 0, loop;
uint64_t addr = table->gpu_addr;
struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
uint32_t size;
int i;
size = header->header_size + header->vce_table_size + header->uvd_table_size;
/* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
/* 2, update vmid of descriptor */
data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID);
data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID, data);
/* 3, notify mmsch about the size of this descriptor */
WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE, size);
/* 4, set resp to zero */
WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
if (adev->uvd.harvest_config & (1 << i))
continue;
WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
*adev->uvd.inst[i].ring_enc[0].wptr_cpu_addr = 0;
adev->uvd.inst[i].ring_enc[0].wptr = 0;
adev->uvd.inst[i].ring_enc[0].wptr_old = 0;
}
/* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
loop = 1000;
while ((data & 0x10000002) != 0x10000002) {
udelay(10);
data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP);
loop--;
if (!loop)
break;
}
if (!loop) {
dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
return -EBUSY;
}
return 0;
}
static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
uint32_t offset, size, tmp;
uint32_t table_size = 0;
struct mmsch_v1_0_cmd_direct_write direct_wt = { {0} };
struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
struct mmsch_v1_0_cmd_direct_polling direct_poll = { {0} };
struct mmsch_v1_0_cmd_end end = { {0} };
uint32_t *init_table = adev->virt.mm_table.cpu_addr;
struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
uint8_t i = 0;
direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
end.cmd_header.command_type = MMSCH_COMMAND__END;
if (header->uvd_table_offset == 0 && header->uvd_table_size == 0) {
header->version = MMSCH_VERSION;
header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
if (header->vce_table_offset == 0 && header->vce_table_size == 0)
header->uvd_table_offset = header->header_size;
else
header->uvd_table_offset = header->vce_table_size + header->vce_table_offset;
init_table += header->uvd_table_offset;
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
if (adev->uvd.harvest_config & (1 << i))
continue;
ring = &adev->uvd.inst[i].ring;
ring->wptr = 0;
size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
0xFFFFFFFF, 0x00000004);
/* mc resume*/
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0);
offset = 0;
} else {
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
lower_32_bits(adev->uvd.inst[i].gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
upper_32_bits(adev->uvd.inst[i].gpu_addr));
offset = size;
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
}
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
lower_32_bits(adev->uvd.inst[i].gpu_addr + offset));
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
upper_32_bits(adev->uvd.inst[i].gpu_addr + offset));
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21));
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE));
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21));
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
/* mc resume end*/
/* disable clock gating */
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_CGC_CTRL),
~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0);
/* disable interupt */
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
~UVD_MASTINT_EN__VCPU_EN_MASK, 0);
/* stall UMC and register bus before resetting VCPU */
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
/* put LMI, VCPU, RBC etc... into reset */
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
(uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK));
/* initialize UVD memory controller */
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL),
(uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
UVD_LMI_CTRL__REQ_MODE_MASK |
0x00100000L));
/* take all subblocks out of reset, except VCPU */
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET),
UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
/* enable VCPU clock */
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
UVD_VCPU_CNTL__CLK_EN_MASK);
/* enable master interrupt */
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
/* clear the bit 4 of UVD_STATUS */
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0);
/* force RBC into idle state */
size = order_base_2(ring->ring_size);
tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
ring = &adev->uvd.inst[i].ring_enc[0];
ring->wptr = 0;
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), ring->gpu_addr);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), ring->ring_size / 4);
/* boot up the VCPU */
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), 0);
/* enable UMC */
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0x02, 0x02);
}
/* add end packet */
memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
header->uvd_table_size = table_size;
}
return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
}
/**
* uvd_v7_0_start - start UVD block
*
* @adev: amdgpu_device pointer
*
* Setup and start the UVD block
*/
static int uvd_v7_0_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
uint32_t lmi_swap_cntl;
uint32_t mp_swap_cntl;
int i, j, k, r;
for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
if (adev->uvd.harvest_config & (1 << k))
continue;
/* disable DPG */
WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
}
/* disable byte swapping */
lmi_swap_cntl = 0;
mp_swap_cntl = 0;
uvd_v7_0_mc_resume(adev);
for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
if (adev->uvd.harvest_config & (1 << k))
continue;
ring = &adev->uvd.inst[k].ring;
/* disable clock gating */
WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
/* disable interupt */
WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), 0,
~UVD_MASTINT_EN__VCPU_EN_MASK);
/* stall UMC and register bus before resetting VCPU */
WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2),
UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
mdelay(1);
/* put LMI, VCPU, RBC etc... into reset */
WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
mdelay(5);
/* initialize UVD memory controller */
WREG32_SOC15(UVD, k, mmUVD_LMI_CTRL,
(0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
UVD_LMI_CTRL__REQ_MODE_MASK |
0x00100000L);
#ifdef __BIG_ENDIAN
/* swap (8 in 32) RB and IB */
lmi_swap_cntl = 0xa;
mp_swap_cntl = 0;
#endif
WREG32_SOC15(UVD, k, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
WREG32_SOC15(UVD, k, mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA0, 0x40c2040);
WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA1, 0x0);
WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB0, 0x40c2040);
WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB1, 0x0);
WREG32_SOC15(UVD, k, mmUVD_MPC_SET_ALU, 0);
WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUX, 0x88);
/* take all subblocks out of reset, except VCPU */
WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET,
UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
mdelay(5);
/* enable VCPU clock */
WREG32_SOC15(UVD, k, mmUVD_VCPU_CNTL,
UVD_VCPU_CNTL__CLK_EN_MASK);
/* enable UMC */
WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), 0,
~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
/* boot up the VCPU */
WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, 0);
mdelay(10);
for (i = 0; i < 10; ++i) {
uint32_t status;
for (j = 0; j < 100; ++j) {
status = RREG32_SOC15(UVD, k, mmUVD_STATUS);
if (status & 2)
break;
mdelay(10);
}
r = 0;
if (status & 2)
break;
DRM_ERROR("UVD(%d) not responding, trying to reset the VCPU!!!\n", k);
WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET),
UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
mdelay(10);
WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), 0,
~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
mdelay(10);
r = -1;
}
if (r) {
DRM_ERROR("UVD(%d) not responding, giving up!!!\n", k);
return r;
}
/* enable master interrupt */
WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN),
(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
/* clear the bit 4 of UVD_STATUS */
WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_STATUS), 0,
~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
/* force RBC into idle state */
rb_bufsz = order_base_2(ring->ring_size);
tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
WREG32_SOC15(UVD, k, mmUVD_RBC_RB_CNTL, tmp);
/* set the write pointer delay */
WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR_CNTL, 0);
/* set the wb address */
WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR,
(upper_32_bits(ring->gpu_addr) >> 2));
/* program the RB_BASE for ring buffer */
WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
upper_32_bits(ring->gpu_addr));
/* Initialize the ring buffer's read and write pointers */
WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR, 0);
ring->wptr = RREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR);
WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR,
lower_32_bits(ring->wptr));
WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_RBC_RB_CNTL), 0,
~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
ring = &adev->uvd.inst[k].ring_enc[0];
WREG32_SOC15(UVD, k, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, k, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO, ring->gpu_addr);
WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, k, mmUVD_RB_SIZE, ring->ring_size / 4);
ring = &adev->uvd.inst[k].ring_enc[1];
WREG32_SOC15(UVD, k, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, k, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO2, ring->gpu_addr);
WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, k, mmUVD_RB_SIZE2, ring->ring_size / 4);
}
return 0;
}
/**
* uvd_v7_0_stop - stop UVD block
*
* @adev: amdgpu_device pointer
*
* stop the UVD block
*/
static void uvd_v7_0_stop(struct amdgpu_device *adev)
{
uint8_t i = 0;
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
if (adev->uvd.harvest_config & (1 << i))
continue;
/* force RBC into idle state */
WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
/* Stall UMC and register bus before resetting VCPU */
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2),
UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
mdelay(1);
/* put VCPU into reset */
WREG32_SOC15(UVD, i, mmUVD_SOFT_RESET,
UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
mdelay(5);
/* disable VCPU clock */
WREG32_SOC15(UVD, i, mmUVD_VCPU_CNTL, 0x0);
/* Unstall UMC and register bus */
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
}
}
/**
* uvd_v7_0_ring_emit_fence - emit an fence & trap command
*
* @ring: amdgpu_ring pointer
* @addr: address
* @seq: sequence number
* @flags: fence related flags
*
* Write a fence and a trap command to the ring.
*/
static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags)
{
struct amdgpu_device *adev = ring->adev;
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
amdgpu_ring_write(ring, seq);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
amdgpu_ring_write(ring, addr & 0xffffffff);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
amdgpu_ring_write(ring, 2);
}
/**
* uvd_v7_0_enc_ring_emit_fence - emit an enc fence & trap command
*
* @ring: amdgpu_ring pointer
* @addr: address
* @seq: sequence number
* @flags: fence related flags
*
* Write enc a fence and a trap command to the ring.
*/
static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
u64 seq, unsigned flags)
{
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
amdgpu_ring_write(ring, addr);
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, seq);
amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
}
/**
* uvd_v7_0_ring_emit_hdp_flush - skip HDP flushing
*
* @ring: amdgpu_ring pointer
*/
static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
/* The firmware doesn't seem to like touching registers at this point. */
}
/**
* uvd_v7_0_ring_test_ring - register write test
*
* @ring: amdgpu_ring pointer
*
* Test if we can successfully write to the context register
*/
static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
uint32_t tmp = 0;
unsigned i;
int r;
WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
if (r)
return r;
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID);
if (tmp == 0xDEADBEEF)
break;
udelay(1);
}
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
return r;
}
/**
* uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission.
*
* @p: the CS parser with the IBs
* @job: which job this ib is in
* @ib: which IB to patch
*
*/
static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
struct amdgpu_job *job,
struct amdgpu_ib *ib)
{
struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
unsigned i;
/* No patching necessary for the first instance */
if (!ring->me)
return 0;
for (i = 0; i < ib->length_dw; i += 2) {
uint32_t reg = amdgpu_ib_get_value(ib, i);
reg -= p->adev->reg_offset[UVD_HWIP][0][1];
reg += p->adev->reg_offset[UVD_HWIP][1][1];
amdgpu_ib_set_value(ib, i, reg);
}
return 0;
}
/**
* uvd_v7_0_ring_emit_ib - execute indirect buffer
*
* @ring: amdgpu_ring pointer
* @job: job to retrieve vmid from
* @ib: indirect buffer to execute
* @flags: unused
*
* Write ring commands to execute the indirect buffer
*/
static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_job *job,
struct amdgpu_ib *ib,
uint32_t flags)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_RBC_IB_SIZE), 0));
amdgpu_ring_write(ring, ib->length_dw);
}
/**
* uvd_v7_0_enc_ring_emit_ib - enc execute indirect buffer
*
* @ring: amdgpu_ring pointer
* @job: job to retrive vmid from
* @ib: indirect buffer to execute
* @flags: unused
*
* Write enc ring commands to execute the indirect buffer
*/
static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_job *job,
struct amdgpu_ib *ib,
uint32_t flags)
{
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, ib->length_dw);
}
static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
uint32_t reg, uint32_t val)
{
struct amdgpu_device *adev = ring->adev;
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
amdgpu_ring_write(ring, reg << 2);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
amdgpu_ring_write(ring, val);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
amdgpu_ring_write(ring, 8);
}
static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val, uint32_t mask)
{
struct amdgpu_device *adev = ring->adev;
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0));
amdgpu_ring_write(ring, reg << 2);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0));
amdgpu_ring_write(ring, val);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GP_SCRATCH8), 0));
amdgpu_ring_write(ring, mask);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0));
amdgpu_ring_write(ring, 12);
}
static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
uint32_t data0, data1, mask;
pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
/* wait for reg writes */
data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
data1 = lower_32_bits(pd_addr);
mask = 0xffffffff;
uvd_v7_0_ring_emit_reg_wait(ring, data0, data1, mask);
}
static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
struct amdgpu_device *adev = ring->adev;
int i;
WARN_ON(ring->wptr % 2 || count % 2);
for (i = 0; i < count / 2; i++) {
amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0));
amdgpu_ring_write(ring, 0);
}
}
static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
}
static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
uint32_t reg, uint32_t val,
uint32_t mask)
{
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
amdgpu_ring_write(ring, reg << 2);
amdgpu_ring_write(ring, mask);
amdgpu_ring_write(ring, val);
}
static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vmid, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
/* wait for reg writes */
uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
vmid * hub->ctx_addr_distance,
lower_32_bits(pd_addr), 0xffffffff);
}
static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
uint32_t reg, uint32_t val)
{
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
amdgpu_ring_write(ring, reg << 2);
amdgpu_ring_write(ring, val);
}
#if 0
static bool uvd_v7_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
}
static int uvd_v7_0_wait_for_idle(void *handle)
{
unsigned i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
if (uvd_v7_0_is_idle(handle))
return 0;
}
return -ETIMEDOUT;
}
#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
static bool uvd_v7_0_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS);
if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
(RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) &
AMDGPU_UVD_STATUS_BUSY_MASK))
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
if (srbm_soft_reset) {
adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset;
return true;
} else {
adev->uvd.inst[ring->me].srbm_soft_reset = 0;
return false;
}
}
static int uvd_v7_0_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->uvd.inst[ring->me].srbm_soft_reset)
return 0;
uvd_v7_0_stop(adev);
return 0;
}
static int uvd_v7_0_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset;
if (!adev->uvd.inst[ring->me].srbm_soft_reset)
return 0;
srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset;
if (srbm_soft_reset) {
u32 tmp;
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(mmSRBM_SOFT_RESET, tmp);
tmp = RREG32(mmSRBM_SOFT_RESET);
udelay(50);
tmp &= ~srbm_soft_reset;
WREG32(mmSRBM_SOFT_RESET, tmp);
tmp = RREG32(mmSRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
}
return 0;
}
static int uvd_v7_0_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->uvd.inst[ring->me].srbm_soft_reset)
return 0;
mdelay(5);
return uvd_v7_0_start(adev);
}
#endif
static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
// TODO
return 0;
}
static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
uint32_t ip_instance;
switch (entry->client_id) {
case SOC15_IH_CLIENTID_UVD:
ip_instance = 0;
break;
case SOC15_IH_CLIENTID_UVD1:
ip_instance = 1;
break;
default:
DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
return 0;
}
DRM_DEBUG("IH: UVD TRAP\n");
switch (entry->src_id) {
case 124:
amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring);
break;
case 119:
amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[0]);
break;
case 120:
if (!amdgpu_sriov_vf(adev))
amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[1]);
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
break;
}
return 0;
}
#if 0
static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev)
{
uint32_t data, data1, data2, suvd_flags;
data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL);
data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL);
data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
UVD_SUVD_CGC_GATE__SIT_MASK |
UVD_SUVD_CGC_GATE__SMP_MASK |
UVD_SUVD_CGC_GATE__SCM_MASK |
UVD_SUVD_CGC_GATE__SDB_MASK;
data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
(4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
UVD_CGC_CTRL__SYS_MODE_MASK |
UVD_CGC_CTRL__UDEC_MODE_MASK |
UVD_CGC_CTRL__MPEG2_MODE_MASK |
UVD_CGC_CTRL__REGS_MODE_MASK |
UVD_CGC_CTRL__RBC_MODE_MASK |
UVD_CGC_CTRL__LMI_MC_MODE_MASK |
UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
UVD_CGC_CTRL__IDCT_MODE_MASK |
UVD_CGC_CTRL__MPRD_MODE_MASK |
UVD_CGC_CTRL__MPC_MODE_MASK |
UVD_CGC_CTRL__LBSI_MODE_MASK |
UVD_CGC_CTRL__LRBBM_MODE_MASK |
UVD_CGC_CTRL__WCB_MODE_MASK |
UVD_CGC_CTRL__VCPU_MODE_MASK |
UVD_CGC_CTRL__JPEG_MODE_MASK |
UVD_CGC_CTRL__JPEG2_MODE_MASK |
UVD_CGC_CTRL__SCPU_MODE_MASK);
data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
data1 |= suvd_flags;
WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data);
WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0);
WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2);
}
static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev)
{
uint32_t data, data1, cgc_flags, suvd_flags;
data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE);
data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE);
cgc_flags = UVD_CGC_GATE__SYS_MASK |
UVD_CGC_GATE__UDEC_MASK |
UVD_CGC_GATE__MPEG2_MASK |
UVD_CGC_GATE__RBC_MASK |
UVD_CGC_GATE__LMI_MC_MASK |
UVD_CGC_GATE__IDCT_MASK |
UVD_CGC_GATE__MPRD_MASK |
UVD_CGC_GATE__MPC_MASK |
UVD_CGC_GATE__LBSI_MASK |
UVD_CGC_GATE__LRBBM_MASK |
UVD_CGC_GATE__UDEC_RE_MASK |
UVD_CGC_GATE__UDEC_CM_MASK |
UVD_CGC_GATE__UDEC_IT_MASK |
UVD_CGC_GATE__UDEC_DB_MASK |
UVD_CGC_GATE__UDEC_MP_MASK |
UVD_CGC_GATE__WCB_MASK |
UVD_CGC_GATE__VCPU_MASK |
UVD_CGC_GATE__SCPU_MASK |
UVD_CGC_GATE__JPEG_MASK |
UVD_CGC_GATE__JPEG2_MASK;
suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
UVD_SUVD_CGC_GATE__SIT_MASK |
UVD_SUVD_CGC_GATE__SMP_MASK |
UVD_SUVD_CGC_GATE__SCM_MASK |
UVD_SUVD_CGC_GATE__SDB_MASK;
data |= cgc_flags;
data1 |= suvd_flags;
WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data);
WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1);
}
static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
{
u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
if (enable)
tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
else
tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
}
static int uvd_v7_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE);
uvd_v7_0_set_bypass_mode(adev, enable);
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
return 0;
if (enable) {
/* disable HW gating and enable Sw gating */
uvd_v7_0_set_sw_clock_gating(adev);
} else {
/* wait for STATUS to clear */
if (uvd_v7_0_wait_for_idle(handle))
return -EBUSY;
/* enable HW gates because UVD is idle */
/* uvd_v7_0_set_hw_clock_gating(adev); */
}
return 0;
}
static int uvd_v7_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the UVD block.
* That's done in the dpm code via the SMC. This
* just re-inits the block as necessary. The actual
* gating still happens in the dpm code. We should
* revisit this when there is a cleaner line between
* the smc and the hw blocks
*/
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
return 0;
WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
if (state == AMD_PG_STATE_GATE) {
uvd_v7_0_stop(adev);
return 0;
} else {
return uvd_v7_0_start(adev);
}
}
#endif
static int uvd_v7_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
/* needed for driver unload*/
return 0;
}
const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
.name = "uvd_v7_0",
.early_init = uvd_v7_0_early_init,
.late_init = NULL,
.sw_init = uvd_v7_0_sw_init,
.sw_fini = uvd_v7_0_sw_fini,
.hw_init = uvd_v7_0_hw_init,
.hw_fini = uvd_v7_0_hw_fini,
.suspend = uvd_v7_0_suspend,
.resume = uvd_v7_0_resume,
.is_idle = NULL /* uvd_v7_0_is_idle */,
.wait_for_idle = NULL /* uvd_v7_0_wait_for_idle */,
.check_soft_reset = NULL /* uvd_v7_0_check_soft_reset */,
.pre_soft_reset = NULL /* uvd_v7_0_pre_soft_reset */,
.soft_reset = NULL /* uvd_v7_0_soft_reset */,
.post_soft_reset = NULL /* uvd_v7_0_post_soft_reset */,
.set_clockgating_state = uvd_v7_0_set_clockgating_state,
.set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */,
};
static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_UVD,
.align_mask = 0xf,
.support_64bit_ptrs = false,
.no_user_fence = true,
.get_rptr = uvd_v7_0_ring_get_rptr,
.get_wptr = uvd_v7_0_ring_get_wptr,
.set_wptr = uvd_v7_0_ring_set_wptr,
.patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place,
.emit_frame_size =
6 + /* hdp invalidate */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
8 + /* uvd_v7_0_ring_emit_vm_flush */
14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */
.emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */
.emit_ib = uvd_v7_0_ring_emit_ib,
.emit_fence = uvd_v7_0_ring_emit_fence,
.emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
.emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
.test_ring = uvd_v7_0_ring_test_ring,
.test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = uvd_v7_0_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
.emit_wreg = uvd_v7_0_ring_emit_wreg,
.emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_UVD_ENC,
.align_mask = 0x3f,
.nop = HEVC_ENC_CMD_NO_OP,
.support_64bit_ptrs = false,
.no_user_fence = true,
.get_rptr = uvd_v7_0_enc_ring_get_rptr,
.get_wptr = uvd_v7_0_enc_ring_get_wptr,
.set_wptr = uvd_v7_0_enc_ring_set_wptr,
.emit_frame_size =
3 + 3 + /* hdp flush / invalidate */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
4 + /* uvd_v7_0_enc_ring_emit_vm_flush */
5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */
1, /* uvd_v7_0_enc_ring_insert_end */
.emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */
.emit_ib = uvd_v7_0_enc_ring_emit_ib,
.emit_fence = uvd_v7_0_enc_ring_emit_fence,
.emit_vm_flush = uvd_v7_0_enc_ring_emit_vm_flush,
.test_ring = uvd_v7_0_enc_ring_test_ring,
.test_ib = uvd_v7_0_enc_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.insert_end = uvd_v7_0_enc_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
.emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
.emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
if (adev->uvd.harvest_config & (1 << i))
continue;
adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
adev->uvd.inst[i].ring.me = i;
DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
}
}
static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
{
int i, j;
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
if (adev->uvd.harvest_config & (1 << j))
continue;
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
adev->uvd.inst[j].ring_enc[i].me = j;
}
DRM_INFO("UVD(%d) ENC is enabled in VM mode\n", j);
}
}
static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = {
.set = uvd_v7_0_set_interrupt_state,
.process = uvd_v7_0_process_interrupt,
};
static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
{
int i;
for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
if (adev->uvd.harvest_config & (1 << i))
continue;
adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
}
}
const struct amdgpu_ip_block_version uvd_v7_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_UVD,
.major = 7,
.minor = 0,
.rev = 0,
.funcs = &uvd_v7_0_ip_funcs,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c |
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <drm/drm_cache.h>
#include "amdgpu.h"
#include "cikd.h"
#include "cik.h"
#include "gmc_v7_0.h"
#include "amdgpu_ucode.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_gem.h"
#include "bif/bif_4_1_d.h"
#include "bif/bif_4_1_sh_mask.h"
#include "gmc/gmc_7_1_d.h"
#include "gmc/gmc_7_1_sh_mask.h"
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
#include "dce/dce_8_0_d.h"
#include "dce/dce_8_0_sh_mask.h"
#include "amdgpu_atombios.h"
#include "ivsrcid/ivsrcid_vislands30.h"
static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev);
static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
static int gmc_v7_0_wait_for_idle(void *handle);
MODULE_FIRMWARE("amdgpu/bonaire_mc.bin");
MODULE_FIRMWARE("amdgpu/hawaii_mc.bin");
MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
static const u32 golden_settings_iceland_a11[] = {
mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
};
static const u32 iceland_mgcg_cgcg_init[] = {
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
};
static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_TOPAZ:
amdgpu_device_program_register_sequence(adev,
iceland_mgcg_cgcg_init,
ARRAY_SIZE(iceland_mgcg_cgcg_init));
amdgpu_device_program_register_sequence(adev,
golden_settings_iceland_a11,
ARRAY_SIZE(golden_settings_iceland_a11));
break;
default:
break;
}
}
static void gmc_v7_0_mc_stop(struct amdgpu_device *adev)
{
u32 blackout;
gmc_v7_0_wait_for_idle((void *)adev);
blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
/* Block CPU access */
WREG32(mmBIF_FB_EN, 0);
/* blackout the MC */
blackout = REG_SET_FIELD(blackout,
MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
}
/* wait for the MC to settle */
udelay(100);
}
static void gmc_v7_0_mc_resume(struct amdgpu_device *adev)
{
u32 tmp;
/* unblackout the MC */
tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
/* allow CPU access */
tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
WREG32(mmBIF_FB_EN, tmp);
}
/**
* gmc_v7_0_init_microcode - load ucode images from disk
*
* @adev: amdgpu_device pointer
*
* Use the firmware interface to load the ucode images into
* the driver (not loaded into hw).
* Returns 0 on success, error on failure.
*/
static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
char fw_name[30];
int err;
DRM_DEBUG("\n");
switch (adev->asic_type) {
case CHIP_BONAIRE:
chip_name = "bonaire";
break;
case CHIP_HAWAII:
chip_name = "hawaii";
break;
case CHIP_TOPAZ:
chip_name = "topaz";
break;
case CHIP_KAVERI:
case CHIP_KABINI:
case CHIP_MULLINS:
return 0;
default:
return -EINVAL;
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->gmc.fw, fw_name);
if (err) {
pr_err("cik_mc: Failed to load firmware \"%s\"\n", fw_name);
amdgpu_ucode_release(&adev->gmc.fw);
}
return err;
}
/**
* gmc_v7_0_mc_load_microcode - load MC ucode into the hw
*
* @adev: amdgpu_device pointer
*
* Load the GDDR MC ucode into the hw (CIK).
* Returns 0 on success, error on failure.
*/
static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
{
const struct mc_firmware_header_v1_0 *hdr;
const __le32 *fw_data = NULL;
const __le32 *io_mc_regs = NULL;
u32 running;
int i, ucode_size, regs_size;
if (!adev->gmc.fw)
return -EINVAL;
hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
amdgpu_ucode_print_mc_hdr(&hdr->header);
adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
io_mc_regs = (const __le32 *)
(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
fw_data = (const __le32 *)
(adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
if (running == 0) {
/* reset the engine and set to writable */
WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
/* load mc io regs */
for (i = 0; i < regs_size; i++) {
WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++));
WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++));
}
/* load the MC ucode */
for (i = 0; i < ucode_size; i++)
WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++));
/* put the engine back into the active state */
WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
/* wait for training to complete */
for (i = 0; i < adev->usec_timeout; i++) {
if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0))
break;
udelay(1);
}
for (i = 0; i < adev->usec_timeout; i++) {
if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL),
MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1))
break;
udelay(1);
}
}
return 0;
}
static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc)
{
u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
base <<= 24;
amdgpu_gmc_vram_location(adev, mc, base);
amdgpu_gmc_gart_location(adev, mc);
}
/**
* gmc_v7_0_mc_program - program the GPU memory controller
*
* @adev: amdgpu_device pointer
*
* Set the location of vram, gart, and AGP in the GPU's
* physical address space (CIK).
*/
static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
{
u32 tmp;
int i, j;
/* Initialize HDP */
for (i = 0, j = 0; i < 32; i++, j += 0x6) {
WREG32((0xb05 + j), 0x00000000);
WREG32((0xb06 + j), 0x00000000);
WREG32((0xb07 + j), 0x00000000);
WREG32((0xb08 + j), 0x00000000);
WREG32((0xb09 + j), 0x00000000);
}
WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
if (gmc_v7_0_wait_for_idle((void *)adev))
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
if (adev->mode_info.num_crtc) {
/* Lockout access through VGA aperture*/
tmp = RREG32(mmVGA_HDP_CONTROL);
tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
WREG32(mmVGA_HDP_CONTROL, tmp);
/* disable VGA render */
tmp = RREG32(mmVGA_RENDER_CONTROL);
tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
WREG32(mmVGA_RENDER_CONTROL, tmp);
}
/* Update configuration */
WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
adev->gmc.vram_start >> 12);
WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
adev->gmc.vram_end >> 12);
WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
adev->mem_scratch.gpu_addr >> 12);
WREG32(mmMC_VM_AGP_BASE, 0);
WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
if (gmc_v7_0_wait_for_idle((void *)adev))
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
tmp = RREG32(mmHDP_MISC_CNTL);
tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
WREG32(mmHDP_MISC_CNTL, tmp);
tmp = RREG32(mmHDP_HOST_PATH_CNTL);
WREG32(mmHDP_HOST_PATH_CNTL, tmp);
}
/**
* gmc_v7_0_mc_init - initialize the memory controller driver params
*
* @adev: amdgpu_device pointer
*
* Look up the amount of vram, vram width, and decide how to place
* vram and gart within the GPU's physical address space (CIK).
* Returns 0 for success.
*/
static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
{
int r;
adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
if (!adev->gmc.vram_width) {
u32 tmp;
int chansize, numchan;
/* Get VRAM informations */
tmp = RREG32(mmMC_ARB_RAMCFG);
if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE))
chansize = 64;
else
chansize = 32;
tmp = RREG32(mmMC_SHARED_CHMAP);
switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
case 0:
default:
numchan = 1;
break;
case 1:
numchan = 2;
break;
case 2:
numchan = 4;
break;
case 3:
numchan = 8;
break;
case 4:
numchan = 3;
break;
case 5:
numchan = 6;
break;
case 6:
numchan = 10;
break;
case 7:
numchan = 12;
break;
case 8:
numchan = 16;
break;
}
adev->gmc.vram_width = numchan * chansize;
}
/* size in MB on si */
adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
if (!(adev->flags & AMD_IS_APU)) {
r = amdgpu_device_resize_fb_bar(adev);
if (r)
return r;
}
adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
#ifdef CONFIG_X86_64
if ((adev->flags & AMD_IS_APU) &&
adev->gmc.real_vram_size > adev->gmc.aper_size &&
!amdgpu_passthrough(adev)) {
adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
adev->gmc.aper_size = adev->gmc.real_vram_size;
}
#endif
adev->gmc.visible_vram_size = adev->gmc.aper_size;
/* set the gart size */
if (amdgpu_gart_size == -1) {
switch (adev->asic_type) {
case CHIP_TOPAZ: /* no MM engines */
default:
adev->gmc.gart_size = 256ULL << 20;
break;
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE: /* UVD, VCE do not support GPUVM */
case CHIP_HAWAII: /* UVD, VCE do not support GPUVM */
case CHIP_KAVERI: /* UVD, VCE do not support GPUVM */
case CHIP_KABINI: /* UVD, VCE do not support GPUVM */
case CHIP_MULLINS: /* UVD, VCE do not support GPUVM */
adev->gmc.gart_size = 1024ULL << 20;
break;
#endif
}
} else {
adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
}
adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
gmc_v7_0_vram_gtt_location(adev, &adev->gmc);
return 0;
}
/**
* gmc_v7_0_flush_gpu_tlb_pasid - tlb flush via pasid
*
* @adev: amdgpu_device pointer
* @pasid: pasid to be flush
* @flush_type: type of flush
* @all_hub: flush all hubs
* @inst: is used to select which instance of KIQ to use for the invalidation
*
* Flush the TLB for the requested pasid.
*/
static int gmc_v7_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint16_t pasid, uint32_t flush_type,
bool all_hub, uint32_t inst)
{
int vmid;
unsigned int tmp;
if (amdgpu_in_reset(adev))
return -EIO;
for (vmid = 1; vmid < 16; vmid++) {
tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
(tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
RREG32(mmVM_INVALIDATE_RESPONSE);
break;
}
}
return 0;
}
/*
* GART
* VMID 0 is the physical GPU addresses as used by the kernel.
* VMIDs 1-15 are used for userspace clients and are handled
* by the amdgpu vm/hsa code.
*/
/**
* gmc_v7_0_flush_gpu_tlb - gart tlb flush callback
*
* @adev: amdgpu_device pointer
* @vmid: vm instance to flush
* @vmhub: which hub to flush
* @flush_type: type of flush
* *
* Flush the TLB for the requested page table (CIK).
*/
static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
uint32_t vmhub, uint32_t flush_type)
{
/* bits 0-15 are the VM contexts0-15 */
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
}
static uint64_t gmc_v7_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
unsigned int vmid, uint64_t pd_addr)
{
uint32_t reg;
if (vmid < 8)
reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
else
reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
/* bits 0-15 are the VM contexts0-15 */
amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
return pd_addr;
}
static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
unsigned int pasid)
{
amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
}
static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
}
static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping,
uint64_t *flags)
{
*flags &= ~AMDGPU_PTE_EXECUTABLE;
*flags &= ~AMDGPU_PTE_PRT;
}
/**
* gmc_v7_0_set_fault_enable_default - update VM fault handling
*
* @adev: amdgpu_device pointer
* @value: true redirects VM faults to the default page
*/
static void gmc_v7_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value)
{
u32 tmp;
tmp = RREG32(mmVM_CONTEXT1_CNTL);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
WREG32(mmVM_CONTEXT1_CNTL, tmp);
}
/**
* gmc_v7_0_set_prt - set PRT VM fault
*
* @adev: amdgpu_device pointer
* @enable: enable/disable VM fault handling for PRT
*/
static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
{
uint32_t tmp;
if (enable && !adev->gmc.prt_warning) {
dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
adev->gmc.prt_warning = true;
}
tmp = RREG32(mmVM_PRT_CNTL);
tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable);
tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable);
tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
L2_CACHE_STORE_INVALID_ENTRIES, enable);
tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
L1_TLB_STORE_INVALID_ENTRIES, enable);
tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
MASK_PDE0_FAULT, enable);
WREG32(mmVM_PRT_CNTL, tmp);
if (enable) {
uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
uint32_t high = adev->vm_manager.max_pfn -
(AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
} else {
WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
}
}
/**
* gmc_v7_0_gart_enable - gart enable
*
* @adev: amdgpu_device pointer
*
* This sets up the TLBs, programs the page tables for VMID0,
* sets up the hw for VMIDs 1-15 which are allocated on
* demand, and sets up the global locations for the LDS, GDS,
* and GPUVM for FSA64 clients (CIK).
* Returns 0 for success, errors for failure.
*/
static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
{
uint64_t table_addr;
u32 tmp, field;
int i;
if (adev->gart.bo == NULL) {
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
/* Setup TLB control */
tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
/* Setup L2 cache */
tmp = RREG32(mmVM_L2_CNTL);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
WREG32(mmVM_L2_CNTL, tmp);
tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
WREG32(mmVM_L2_CNTL2, tmp);
field = adev->vm_manager.fragment_size;
tmp = RREG32(mmVM_L2_CNTL3);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
WREG32(mmVM_L2_CNTL3, tmp);
/* setup context0 */
WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12);
WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(adev->dummy_page_addr >> 12));
WREG32(mmVM_CONTEXT0_CNTL2, 0);
tmp = RREG32(mmVM_CONTEXT0_CNTL);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
WREG32(mmVM_CONTEXT0_CNTL, tmp);
WREG32(0x575, 0);
WREG32(0x576, 0);
WREG32(0x577, 0);
/* empty context1-15 */
/* FIXME start with 4G, once using 2 level pt switch to full
* vm size space
*/
/* set vm size, must be a multiple of 4 */
WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
for (i = 1; i < AMDGPU_NUM_VMID; i++) {
if (i < 8)
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
table_addr >> 12);
else
WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
table_addr >> 12);
}
/* enable context1-15 */
WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(adev->dummy_page_addr >> 12));
WREG32(mmVM_CONTEXT1_CNTL2, 4);
tmp = RREG32(mmVM_CONTEXT1_CNTL);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
adev->vm_manager.block_size - 9);
WREG32(mmVM_CONTEXT1_CNTL, tmp);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
gmc_v7_0_set_fault_enable_default(adev, false);
else
gmc_v7_0_set_fault_enable_default(adev, true);
if (adev->asic_type == CHIP_KAVERI) {
tmp = RREG32(mmCHUB_CONTROL);
tmp &= ~BYPASS_VM;
WREG32(mmCHUB_CONTROL, tmp);
}
gmc_v7_0_flush_gpu_tlb(adev, 0, 0, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned int)(adev->gmc.gart_size >> 20),
(unsigned long long)table_addr);
return 0;
}
static int gmc_v7_0_gart_init(struct amdgpu_device *adev)
{
int r;
if (adev->gart.bo) {
WARN(1, "R600 PCIE GART already initialized\n");
return 0;
}
/* Initialize common gart structure */
r = amdgpu_gart_init(adev);
if (r)
return r;
adev->gart.table_size = adev->gart.num_gpu_pages * 8;
adev->gart.gart_pte_flags = 0;
return amdgpu_gart_table_vram_alloc(adev);
}
/**
* gmc_v7_0_gart_disable - gart disable
*
* @adev: amdgpu_device pointer
*
* This disables all VM page table (CIK).
*/
static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
{
u32 tmp;
/* Disable all tables */
WREG32(mmVM_CONTEXT0_CNTL, 0);
WREG32(mmVM_CONTEXT1_CNTL, 0);
/* Setup TLB control */
tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0);
WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp);
/* Setup L2 cache */
tmp = RREG32(mmVM_L2_CNTL);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
WREG32(mmVM_L2_CNTL, tmp);
WREG32(mmVM_L2_CNTL2, 0);
}
/**
* gmc_v7_0_vm_decode_fault - print human readable fault info
*
* @adev: amdgpu_device pointer
* @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
* @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
* @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value
* @pasid: debug logging only - no functional use
*
* Print human readable fault information (CIK).
*/
static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
u32 addr, u32 mc_client, unsigned int pasid)
{
u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
PROTECTIONS);
char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
u32 mc_id;
mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
MEMORY_CLIENT_ID);
dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
protections, vmid, pasid, addr,
REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
MEMORY_CLIENT_RW) ?
"write" : "read", block, mc_client, mc_id);
}
static const u32 mc_cg_registers[] = {
mmMC_HUB_MISC_HUB_CG,
mmMC_HUB_MISC_SIP_CG,
mmMC_HUB_MISC_VM_CG,
mmMC_XPB_CLK_GAT,
mmATC_MISC_CG,
mmMC_CITF_MISC_WR_CG,
mmMC_CITF_MISC_RD_CG,
mmMC_CITF_MISC_VM_CG,
mmVM_L2_CG,
};
static const u32 mc_cg_ls_en[] = {
MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
ATC_MISC_CG__MEM_LS_ENABLE_MASK,
MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
VM_L2_CG__MEM_LS_ENABLE_MASK,
};
static const u32 mc_cg_en[] = {
MC_HUB_MISC_HUB_CG__ENABLE_MASK,
MC_HUB_MISC_SIP_CG__ENABLE_MASK,
MC_HUB_MISC_VM_CG__ENABLE_MASK,
MC_XPB_CLK_GAT__ENABLE_MASK,
ATC_MISC_CG__ENABLE_MASK,
MC_CITF_MISC_WR_CG__ENABLE_MASK,
MC_CITF_MISC_RD_CG__ENABLE_MASK,
MC_CITF_MISC_VM_CG__ENABLE_MASK,
VM_L2_CG__ENABLE_MASK,
};
static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev,
bool enable)
{
int i;
u32 orig, data;
for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
orig = data = RREG32(mc_cg_registers[i]);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
data |= mc_cg_ls_en[i];
else
data &= ~mc_cg_ls_en[i];
if (data != orig)
WREG32(mc_cg_registers[i], data);
}
}
static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev,
bool enable)
{
int i;
u32 orig, data;
for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
orig = data = RREG32(mc_cg_registers[i]);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
data |= mc_cg_en[i];
else
data &= ~mc_cg_en[i];
if (data != orig)
WREG32(mc_cg_registers[i], data);
}
}
static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev,
bool enable)
{
u32 orig, data;
orig = data = RREG32_PCIE(ixPCIE_CNTL2);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
} else {
data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
}
if (orig != data)
WREG32_PCIE(ixPCIE_CNTL2, data);
}
static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev,
bool enable)
{
u32 orig, data;
orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
else
data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
if (orig != data)
WREG32(mmHDP_HOST_PATH_CNTL, data);
}
static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev,
bool enable)
{
u32 orig, data;
orig = data = RREG32(mmHDP_MEM_POWER_LS);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
else
data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
if (orig != data)
WREG32(mmHDP_MEM_POWER_LS, data);
}
static int gmc_v7_0_convert_vram_type(int mc_seq_vram_type)
{
switch (mc_seq_vram_type) {
case MC_SEQ_MISC0__MT__GDDR1:
return AMDGPU_VRAM_TYPE_GDDR1;
case MC_SEQ_MISC0__MT__DDR2:
return AMDGPU_VRAM_TYPE_DDR2;
case MC_SEQ_MISC0__MT__GDDR3:
return AMDGPU_VRAM_TYPE_GDDR3;
case MC_SEQ_MISC0__MT__GDDR4:
return AMDGPU_VRAM_TYPE_GDDR4;
case MC_SEQ_MISC0__MT__GDDR5:
return AMDGPU_VRAM_TYPE_GDDR5;
case MC_SEQ_MISC0__MT__HBM:
return AMDGPU_VRAM_TYPE_HBM;
case MC_SEQ_MISC0__MT__DDR3:
return AMDGPU_VRAM_TYPE_DDR3;
default:
return AMDGPU_VRAM_TYPE_UNKNOWN;
}
}
static int gmc_v7_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v7_0_set_gmc_funcs(adev);
gmc_v7_0_set_irq_funcs(adev);
adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
adev->gmc.shared_aperture_end =
adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
adev->gmc.private_aperture_start =
adev->gmc.shared_aperture_end + 1;
adev->gmc.private_aperture_end =
adev->gmc.private_aperture_start + (4ULL << 30) - 1;
adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
return 0;
}
static int gmc_v7_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
else
return 0;
}
static unsigned int gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev)
{
u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
unsigned int size;
if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
size = AMDGPU_VBIOS_VGA_ALLOCATION;
} else {
u32 viewport = RREG32(mmVIEWPORT_SIZE);
size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
4);
}
return size;
}
static int gmc_v7_0_sw_init(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
if (adev->flags & AMD_IS_APU) {
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
} else {
u32 tmp = RREG32(mmMC_SEQ_MISC0);
tmp &= MC_SEQ_MISC0__MT__MASK;
adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp);
}
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
if (r)
return r;
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
if (r)
return r;
/* Adjust VM size here.
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
/* Set the internal MC address mask
* This is the max address of the GPU's
* internal address space.
*/
adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
if (r) {
pr_warn("No suitable DMA available\n");
return r;
}
adev->need_swiotlb = drm_need_swiotlb(40);
r = gmc_v7_0_init_microcode(adev);
if (r) {
DRM_ERROR("Failed to load mc firmware!\n");
return r;
}
r = gmc_v7_0_mc_init(adev);
if (r)
return r;
amdgpu_gmc_get_vbios_allocations(adev);
/* Memory manager */
r = amdgpu_bo_init(adev);
if (r)
return r;
r = gmc_v7_0_gart_init(adev);
if (r)
return r;
/*
* number of VMs
* VMID 0 is reserved for System
* amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15
*/
adev->vm_manager.first_kfd_vmid = 8;
amdgpu_vm_manager_init(adev);
/* base offset of vram pages */
if (adev->flags & AMD_IS_APU) {
u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
tmp <<= 22;
adev->vm_manager.vram_base_offset = tmp;
} else {
adev->vm_manager.vram_base_offset = 0;
}
adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info),
GFP_KERNEL);
if (!adev->gmc.vm_fault_info)
return -ENOMEM;
atomic_set(&adev->gmc.vm_fault_info_updated, 0);
return 0;
}
static int gmc_v7_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
kfree(adev->gmc.vm_fault_info);
amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev);
amdgpu_ucode_release(&adev->gmc.fw);
return 0;
}
static int gmc_v7_0_hw_init(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v7_0_init_golden_registers(adev);
gmc_v7_0_mc_program(adev);
if (!(adev->flags & AMD_IS_APU)) {
r = gmc_v7_0_mc_load_microcode(adev);
if (r) {
DRM_ERROR("Failed to load MC firmware!\n");
return r;
}
}
r = gmc_v7_0_gart_enable(adev);
if (r)
return r;
if (amdgpu_emu_mode == 1)
return amdgpu_gmc_vram_checking(adev);
else
return r;
}
static int gmc_v7_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
gmc_v7_0_gart_disable(adev);
return 0;
}
static int gmc_v7_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v7_0_hw_fini(adev);
return 0;
}
static int gmc_v7_0_resume(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = gmc_v7_0_hw_init(adev);
if (r)
return r;
amdgpu_vmid_reset_all(adev);
return 0;
}
static bool gmc_v7_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
return false;
return true;
}
static int gmc_v7_0_wait_for_idle(void *handle)
{
unsigned int i;
u32 tmp;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
/* read MC_STATUS */
tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
SRBM_STATUS__MCC_BUSY_MASK |
SRBM_STATUS__MCD_BUSY_MASK |
SRBM_STATUS__VMC_BUSY_MASK);
if (!tmp)
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
static int gmc_v7_0_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
if (!(adev->flags & AMD_IS_APU))
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
}
if (srbm_soft_reset) {
gmc_v7_0_mc_stop(adev);
if (gmc_v7_0_wait_for_idle((void *)adev))
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(mmSRBM_SOFT_RESET, tmp);
tmp = RREG32(mmSRBM_SOFT_RESET);
udelay(50);
tmp &= ~srbm_soft_reset;
WREG32(mmSRBM_SOFT_RESET, tmp);
tmp = RREG32(mmSRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
gmc_v7_0_mc_resume(adev);
udelay(50);
}
return 0;
}
static int gmc_v7_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned int type,
enum amdgpu_interrupt_state state)
{
u32 tmp;
u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
/* system context */
tmp = RREG32(mmVM_CONTEXT0_CNTL);
tmp &= ~bits;
WREG32(mmVM_CONTEXT0_CNTL, tmp);
/* VMs */
tmp = RREG32(mmVM_CONTEXT1_CNTL);
tmp &= ~bits;
WREG32(mmVM_CONTEXT1_CNTL, tmp);
break;
case AMDGPU_IRQ_STATE_ENABLE:
/* system context */
tmp = RREG32(mmVM_CONTEXT0_CNTL);
tmp |= bits;
WREG32(mmVM_CONTEXT0_CNTL, tmp);
/* VMs */
tmp = RREG32(mmVM_CONTEXT1_CNTL);
tmp |= bits;
WREG32(mmVM_CONTEXT1_CNTL, tmp);
break;
default:
break;
}
return 0;
}
static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
u32 addr, status, mc_client, vmid;
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
/* reset addr and status */
WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
if (!addr && !status)
return 0;
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
gmc_v7_0_set_fault_enable_default(adev, false);
if (printk_ratelimit()) {
dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
entry->src_id, entry->src_data[0]);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
addr);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status);
gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client,
entry->pasid);
}
vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
VMID);
if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
&& !atomic_read(&adev->gmc.vm_fault_info_updated)) {
struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
u32 protections = REG_GET_FIELD(status,
VM_CONTEXT1_PROTECTION_FAULT_STATUS,
PROTECTIONS);
info->vmid = vmid;
info->mc_id = REG_GET_FIELD(status,
VM_CONTEXT1_PROTECTION_FAULT_STATUS,
MEMORY_CLIENT_ID);
info->status = status;
info->page_addr = addr;
info->prot_valid = protections & 0x7 ? true : false;
info->prot_read = protections & 0x8 ? true : false;
info->prot_write = protections & 0x10 ? true : false;
info->prot_exec = protections & 0x20 ? true : false;
mb();
atomic_set(&adev->gmc.vm_fault_info_updated, 1);
}
return 0;
}
static int gmc_v7_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
bool gate = false;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (state == AMD_CG_STATE_GATE)
gate = true;
if (!(adev->flags & AMD_IS_APU)) {
gmc_v7_0_enable_mc_mgcg(adev, gate);
gmc_v7_0_enable_mc_ls(adev, gate);
}
gmc_v7_0_enable_bif_mgls(adev, gate);
gmc_v7_0_enable_hdp_mgcg(adev, gate);
gmc_v7_0_enable_hdp_ls(adev, gate);
return 0;
}
static int gmc_v7_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
return 0;
}
static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
.name = "gmc_v7_0",
.early_init = gmc_v7_0_early_init,
.late_init = gmc_v7_0_late_init,
.sw_init = gmc_v7_0_sw_init,
.sw_fini = gmc_v7_0_sw_fini,
.hw_init = gmc_v7_0_hw_init,
.hw_fini = gmc_v7_0_hw_fini,
.suspend = gmc_v7_0_suspend,
.resume = gmc_v7_0_resume,
.is_idle = gmc_v7_0_is_idle,
.wait_for_idle = gmc_v7_0_wait_for_idle,
.soft_reset = gmc_v7_0_soft_reset,
.set_clockgating_state = gmc_v7_0_set_clockgating_state,
.set_powergating_state = gmc_v7_0_set_powergating_state,
};
static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v7_0_flush_gpu_tlb,
.flush_gpu_tlb_pasid = gmc_v7_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
.set_prt = gmc_v7_0_set_prt,
.get_vm_pde = gmc_v7_0_get_vm_pde,
.get_vm_pte = gmc_v7_0_get_vm_pte,
.get_vbios_fb_size = gmc_v7_0_get_vbios_fb_size,
};
static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
.set = gmc_v7_0_vm_fault_interrupt_state,
.process = gmc_v7_0_process_interrupt,
};
static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev)
{
adev->gmc.gmc_funcs = &gmc_v7_0_gmc_funcs;
}
static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->gmc.vm_fault.num_types = 1;
adev->gmc.vm_fault.funcs = &gmc_v7_0_irq_funcs;
}
const struct amdgpu_ip_block_version gmc_v7_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_GMC,
.major = 7,
.minor = 0,
.rev = 0,
.funcs = &gmc_v7_0_ip_funcs,
};
const struct amdgpu_ip_block_version gmc_v7_4_ip_block = {
.type = AMD_IP_BLOCK_TYPE_GMC,
.major = 7,
.minor = 4,
.rev = 0,
.funcs = &gmc_v7_0_ip_funcs,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c |
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/firmware.h>
#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_vcn.h"
#include "amdgpu_pm.h"
#include "soc15.h"
#include "soc15d.h"
#include "vcn_v2_0.h"
#include "mmsch_v1_0.h"
#include "vcn_v2_5.h"
#include "vcn/vcn_2_5_offset.h"
#include "vcn/vcn_2_5_sh_mask.h"
#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
#define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
#define VCN1_VID_SOC_ADDRESS_3_0 0x48200
#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
#define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x10
#define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x11
#define mmUVD_NO_OP_INTERNAL_OFFSET 0x29
#define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x66
#define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d
#define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x431
#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x3b4
#define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5
#define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c
#define VCN25_MAX_HW_INSTANCES_ARCTURUS 2
static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
static int vcn_v2_5_set_powergating_state(void *handle,
enum amd_powergating_state state);
static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev);
static int amdgpu_ih_clientid_vcns[] = {
SOC15_IH_CLIENTID_VCN,
SOC15_IH_CLIENTID_VCN1
};
/**
* vcn_v2_5_early_init - set function pointers and load microcode
*
* @handle: amdgpu_device pointer
*
* Set ring and irq function pointers
* Load microcode from filesystem
*/
static int vcn_v2_5_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (amdgpu_sriov_vf(adev)) {
adev->vcn.num_vcn_inst = 2;
adev->vcn.harvest_config = 0;
adev->vcn.num_enc_rings = 1;
} else {
u32 harvest;
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
adev->vcn.harvest_config |= 1 << i;
}
if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
AMDGPU_VCN_HARVEST_VCN1))
/* both instances are harvested, disable the block */
return -ENOENT;
adev->vcn.num_enc_rings = 2;
}
vcn_v2_5_set_dec_ring_funcs(adev);
vcn_v2_5_set_enc_ring_funcs(adev);
vcn_v2_5_set_irq_funcs(adev);
vcn_v2_5_set_ras_funcs(adev);
return amdgpu_vcn_early_init(adev);
}
/**
* vcn_v2_5_sw_init - sw init for VCN block
*
* @handle: amdgpu_device pointer
*
* Load firmware and sw initialization
*/
static int vcn_v2_5_sw_init(void *handle)
{
struct amdgpu_ring *ring;
int i, j, r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
if (adev->vcn.harvest_config & (1 << j))
continue;
/* VCN DEC TRAP */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[j].irq);
if (r)
return r;
/* VCN ENC TRAP */
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq);
if (r)
return r;
}
/* VCN POISON TRAP */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].ras_poison_irq);
if (r)
return r;
}
r = amdgpu_vcn_sw_init(adev);
if (r)
return r;
amdgpu_vcn_setup_ucode(adev);
r = amdgpu_vcn_resume(adev);
if (r)
return r;
for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
volatile struct amdgpu_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << j))
continue;
adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(VCN, j, mmUVD_SCRATCH9);
adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA0);
adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA1);
adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_CMD);
adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(VCN, j, mmUVD_NO_OP);
ring = &adev->vcn.inst[j].ring_dec;
ring->use_doorbell = true;
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
(amdgpu_sriov_vf(adev) ? 2*j : 8*j);
if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))
ring->vm_hub = AMDGPU_MMHUB1(0);
else
ring->vm_hub = AMDGPU_MMHUB0(0);
sprintf(ring->name, "vcn_dec_%d", j);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq,
0, AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
ring = &adev->vcn.inst[j].ring_enc[i];
ring->use_doorbell = true;
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
(amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));
if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))
ring->vm_hub = AMDGPU_MMHUB1(0);
else
ring->vm_hub = AMDGPU_MMHUB0(0);
sprintf(ring->name, "vcn_enc_%d.%d", j, i);
r = amdgpu_ring_init(adev, ring, 512,
&adev->vcn.inst[j].irq, 0,
hw_prio, NULL);
if (r)
return r;
}
fw_shared = adev->vcn.inst[j].fw_shared.cpu_addr;
fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
}
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
return r;
}
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
r = amdgpu_vcn_ras_sw_init(adev);
if (r)
return r;
return 0;
}
/**
* vcn_v2_5_sw_fini - sw fini for VCN block
*
* @handle: amdgpu_device pointer
*
* VCN suspend and free up sw allocation
*/
static int vcn_v2_5_sw_fini(void *handle)
{
int i, r, idx;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
volatile struct amdgpu_fw_shared *fw_shared;
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (adev->vcn.harvest_config & (1 << i))
continue;
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
fw_shared->present_flag_0 = 0;
}
drm_dev_exit(idx);
}
if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev);
r = amdgpu_vcn_suspend(adev);
if (r)
return r;
r = amdgpu_vcn_sw_fini(adev);
return r;
}
/**
* vcn_v2_5_hw_init - start and test VCN block
*
* @handle: amdgpu_device pointer
*
* Initialize the hardware, boot up the VCPU and do some testing
*/
static int vcn_v2_5_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
int i, j, r = 0;
if (amdgpu_sriov_vf(adev))
r = vcn_v2_5_sriov_start(adev);
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
if (adev->vcn.harvest_config & (1 << j))
continue;
if (amdgpu_sriov_vf(adev)) {
adev->vcn.inst[j].ring_enc[0].sched.ready = true;
adev->vcn.inst[j].ring_enc[1].sched.ready = false;
adev->vcn.inst[j].ring_enc[2].sched.ready = false;
adev->vcn.inst[j].ring_dec.sched.ready = true;
} else {
ring = &adev->vcn.inst[j].ring_dec;
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
ring->doorbell_index, j);
r = amdgpu_ring_test_helper(ring);
if (r)
goto done;
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
ring = &adev->vcn.inst[j].ring_enc[i];
r = amdgpu_ring_test_helper(ring);
if (r)
goto done;
}
}
}
done:
if (!r)
DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
return r;
}
/**
* vcn_v2_5_hw_fini - stop the hardware block
*
* @handle: amdgpu_device pointer
*
* Stop the VCN block, mark ring as not ready any more
*/
static int vcn_v2_5_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
cancel_delayed_work_sync(&adev->vcn.idle_work);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, i, mmUVD_STATUS)))
vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
}
return 0;
}
/**
* vcn_v2_5_suspend - suspend VCN block
*
* @handle: amdgpu_device pointer
*
* HW fini and suspend VCN block
*/
static int vcn_v2_5_suspend(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = vcn_v2_5_hw_fini(adev);
if (r)
return r;
r = amdgpu_vcn_suspend(adev);
return r;
}
/**
* vcn_v2_5_resume - resume VCN block
*
* @handle: amdgpu_device pointer
*
* Resume firmware and hw init VCN block
*/
static int vcn_v2_5_resume(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = amdgpu_vcn_resume(adev);
if (r)
return r;
r = vcn_v2_5_hw_init(adev);
return r;
}
/**
* vcn_v2_5_mc_resume - memory controller programming
*
* @adev: amdgpu_device pointer
*
* Let the VCN memory controller know it's offsets
*/
static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
{
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
uint32_t offset;
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
/* cache window 0: fw */
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
offset = 0;
} else {
WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
lower_32_bits(adev->vcn.inst[i].gpu_addr));
WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
upper_32_bits(adev->vcn.inst[i].gpu_addr));
offset = size;
WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0,
AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
}
WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE0, size);
/* cache window 1: stack */
WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
/* cache window 2: context */
WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
/* non-cache window */
WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
upper_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_SIZE0,
AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
}
}
static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
{
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
uint32_t offset;
/* cache window 0: fw */
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
if (!indirect) {
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
} else {
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
}
offset = 0;
} else {
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
offset = size;
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_VCPU_CACHE_OFFSET0),
AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
}
if (!indirect)
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
else
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
/* cache window 1: stack */
if (!indirect) {
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
} else {
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
}
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
/* cache window 2: context */
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
/* non-cache window */
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_VCPU_NONCACHE_SIZE0),
AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
/* VCN global tiling registers */
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
}
/**
* vcn_v2_5_disable_clock_gating - disable VCN clock gating
*
* @adev: amdgpu_device pointer
*
* Disable clock gating for VCN block
*/
static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
{
uint32_t data;
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
/* UVD disable CGC */
data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
else
data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
data = RREG32_SOC15(VCN, i, mmUVD_CGC_GATE);
data &= ~(UVD_CGC_GATE__SYS_MASK
| UVD_CGC_GATE__UDEC_MASK
| UVD_CGC_GATE__MPEG2_MASK
| UVD_CGC_GATE__REGS_MASK
| UVD_CGC_GATE__RBC_MASK
| UVD_CGC_GATE__LMI_MC_MASK
| UVD_CGC_GATE__LMI_UMC_MASK
| UVD_CGC_GATE__IDCT_MASK
| UVD_CGC_GATE__MPRD_MASK
| UVD_CGC_GATE__MPC_MASK
| UVD_CGC_GATE__LBSI_MASK
| UVD_CGC_GATE__LRBBM_MASK
| UVD_CGC_GATE__UDEC_RE_MASK
| UVD_CGC_GATE__UDEC_CM_MASK
| UVD_CGC_GATE__UDEC_IT_MASK
| UVD_CGC_GATE__UDEC_DB_MASK
| UVD_CGC_GATE__UDEC_MP_MASK
| UVD_CGC_GATE__WCB_MASK
| UVD_CGC_GATE__VCPU_MASK
| UVD_CGC_GATE__MMSCH_MASK);
WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data);
SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0, 0xFFFFFFFF);
data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
| UVD_CGC_CTRL__UDEC_CM_MODE_MASK
| UVD_CGC_CTRL__UDEC_IT_MODE_MASK
| UVD_CGC_CTRL__UDEC_DB_MODE_MASK
| UVD_CGC_CTRL__UDEC_MP_MODE_MASK
| UVD_CGC_CTRL__SYS_MODE_MASK
| UVD_CGC_CTRL__UDEC_MODE_MASK
| UVD_CGC_CTRL__MPEG2_MODE_MASK
| UVD_CGC_CTRL__REGS_MODE_MASK
| UVD_CGC_CTRL__RBC_MODE_MASK
| UVD_CGC_CTRL__LMI_MC_MODE_MASK
| UVD_CGC_CTRL__LMI_UMC_MODE_MASK
| UVD_CGC_CTRL__IDCT_MODE_MASK
| UVD_CGC_CTRL__MPRD_MODE_MASK
| UVD_CGC_CTRL__MPC_MODE_MASK
| UVD_CGC_CTRL__LBSI_MODE_MASK
| UVD_CGC_CTRL__LRBBM_MODE_MASK
| UVD_CGC_CTRL__WCB_MODE_MASK
| UVD_CGC_CTRL__VCPU_MODE_MASK
| UVD_CGC_CTRL__MMSCH_MODE_MASK);
WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
/* turn on */
data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE);
data |= (UVD_SUVD_CGC_GATE__SRE_MASK
| UVD_SUVD_CGC_GATE__SIT_MASK
| UVD_SUVD_CGC_GATE__SMP_MASK
| UVD_SUVD_CGC_GATE__SCM_MASK
| UVD_SUVD_CGC_GATE__SDB_MASK
| UVD_SUVD_CGC_GATE__SRE_H264_MASK
| UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
| UVD_SUVD_CGC_GATE__SIT_H264_MASK
| UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
| UVD_SUVD_CGC_GATE__SCM_H264_MASK
| UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
| UVD_SUVD_CGC_GATE__SDB_H264_MASK
| UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
| UVD_SUVD_CGC_GATE__SCLR_MASK
| UVD_SUVD_CGC_GATE__UVD_SC_MASK
| UVD_SUVD_CGC_GATE__ENT_MASK
| UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
| UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
| UVD_SUVD_CGC_GATE__SITE_MASK
| UVD_SUVD_CGC_GATE__SRE_VP9_MASK
| UVD_SUVD_CGC_GATE__SCM_VP9_MASK
| UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
| UVD_SUVD_CGC_GATE__SDB_VP9_MASK
| UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE, data);
data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
| UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
| UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
| UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
| UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
| UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
| UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
| UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
| UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
}
}
static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
uint8_t sram_sel, int inst_idx, uint8_t indirect)
{
uint32_t reg_data = 0;
/* enable sw clock gating control */
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
else
reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
UVD_CGC_CTRL__SYS_MODE_MASK |
UVD_CGC_CTRL__UDEC_MODE_MASK |
UVD_CGC_CTRL__MPEG2_MODE_MASK |
UVD_CGC_CTRL__REGS_MODE_MASK |
UVD_CGC_CTRL__RBC_MODE_MASK |
UVD_CGC_CTRL__LMI_MC_MODE_MASK |
UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
UVD_CGC_CTRL__IDCT_MODE_MASK |
UVD_CGC_CTRL__MPRD_MODE_MASK |
UVD_CGC_CTRL__MPC_MODE_MASK |
UVD_CGC_CTRL__LBSI_MODE_MASK |
UVD_CGC_CTRL__LRBBM_MODE_MASK |
UVD_CGC_CTRL__WCB_MODE_MASK |
UVD_CGC_CTRL__VCPU_MODE_MASK |
UVD_CGC_CTRL__MMSCH_MODE_MASK);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
/* turn off clock gating */
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
/* turn on SUVD clock gating */
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
/* turn on sw mode in UVD_SUVD_CGC_CTRL */
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
}
/**
* vcn_v2_5_enable_clock_gating - enable VCN clock gating
*
* @adev: amdgpu_device pointer
*
* Enable clock gating for VCN block
*/
static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
{
uint32_t data = 0;
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
/* enable UVD CGC */
data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
else
data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
| UVD_CGC_CTRL__UDEC_CM_MODE_MASK
| UVD_CGC_CTRL__UDEC_IT_MODE_MASK
| UVD_CGC_CTRL__UDEC_DB_MODE_MASK
| UVD_CGC_CTRL__UDEC_MP_MODE_MASK
| UVD_CGC_CTRL__SYS_MODE_MASK
| UVD_CGC_CTRL__UDEC_MODE_MASK
| UVD_CGC_CTRL__MPEG2_MODE_MASK
| UVD_CGC_CTRL__REGS_MODE_MASK
| UVD_CGC_CTRL__RBC_MODE_MASK
| UVD_CGC_CTRL__LMI_MC_MODE_MASK
| UVD_CGC_CTRL__LMI_UMC_MODE_MASK
| UVD_CGC_CTRL__IDCT_MODE_MASK
| UVD_CGC_CTRL__MPRD_MODE_MASK
| UVD_CGC_CTRL__MPC_MODE_MASK
| UVD_CGC_CTRL__LBSI_MODE_MASK
| UVD_CGC_CTRL__LRBBM_MODE_MASK
| UVD_CGC_CTRL__WCB_MODE_MASK
| UVD_CGC_CTRL__VCPU_MODE_MASK);
WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
| UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
| UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
| UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
| UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
| UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
| UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
| UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
| UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
}
}
static void vcn_v2_6_enable_ras(struct amdgpu_device *adev, int inst_idx,
bool indirect)
{
uint32_t tmp;
if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(2, 6, 0))
return;
tmp = VCN_RAS_CNTL__VCPU_VCODEC_REARM_MASK |
VCN_RAS_CNTL__VCPU_VCODEC_IH_EN_MASK |
VCN_RAS_CNTL__VCPU_VCODEC_PMI_EN_MASK |
VCN_RAS_CNTL__VCPU_VCODEC_STALL_EN_MASK;
WREG32_SOC15_DPG_MODE(inst_idx,
SOC15_DPG_MODE_OFFSET(VCN, 0, mmVCN_RAS_CNTL),
tmp, 0, indirect);
tmp = UVD_VCPU_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK;
WREG32_SOC15_DPG_MODE(inst_idx,
SOC15_DPG_MODE_OFFSET(VCN, 0, mmUVD_VCPU_INT_EN),
tmp, 0, indirect);
tmp = UVD_SYS_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK;
WREG32_SOC15_DPG_MODE(inst_idx,
SOC15_DPG_MODE_OFFSET(VCN, 0, mmUVD_SYS_INT_EN),
tmp, 0, indirect);
}
static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
{
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
/* enable dynamic power gating mode */
tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS);
tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp);
if (indirect)
adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
/* enable clock gating */
vcn_v2_5_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
/* enable VCPU clock */
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
/* disable master interupt */
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
/* setup mmUVD_LMI_CTRL */
tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
UVD_LMI_CTRL__REQ_MODE_MASK |
UVD_LMI_CTRL__CRC_RESET_MASK |
UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
0x00100000L);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_MPC_CNTL),
0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_MPC_SET_MUXA0),
((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_MPC_SET_MUXB0),
((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_MPC_SET_MUX),
((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
/* enable LMI MC and UMC channels */
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
vcn_v2_6_enable_ras(adev, inst_idx, indirect);
/* unblock VCPU register access */
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
/* enable master interrupt */
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, 0, mmUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
if (indirect)
amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
ring = &adev->vcn.inst[inst_idx].ring_dec;
/* force RBC into idle state */
rb_bufsz = order_base_2(ring->ring_size);
tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
/* Stall DPG before WPTR/RPTR reset */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
/* set the write pointer delay */
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
/* set the wb address */
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
(upper_32_bits(ring->gpu_addr) >> 2));
/* program the RB_BASE for ring buffer */
WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
upper_32_bits(ring->gpu_addr));
/* Initialize the ring buffer's read and write pointers */
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0);
WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0);
ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR);
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
lower_32_bits(ring->wptr));
fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
/* Unstall DPG */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
return 0;
}
static int vcn_v2_5_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
int i, j, k, r;
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, true);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
r = vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
continue;
}
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
/* set uvd status busy */
tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
}
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
return 0;
/*SW clock gating */
vcn_v2_5_disable_clock_gating(adev);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
/* enable VCPU clock */
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
/* disable master interrupt */
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
~UVD_MASTINT_EN__VCPU_EN_MASK);
/* setup mmUVD_LMI_CTRL */
tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
tmp &= ~0xff;
WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp | 0x8|
UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
/* setup mmUVD_MPC_CNTL */
tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
/* setup UVD_MPC_SET_MUXA0 */
WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
/* setup UVD_MPC_SET_MUXB0 */
WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
/* setup mmUVD_MPC_SET_MUX */
WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
}
vcn_v2_5_mc_resume(adev);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
if (adev->vcn.harvest_config & (1 << i))
continue;
/* VCN global tiling registers */
WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
adev->gfx.config.gb_addr_config);
WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
adev->gfx.config.gb_addr_config);
/* enable LMI MC and UMC channels */
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
/* unblock VCPU register access */
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
~UVD_VCPU_CNTL__BLK_RST_MASK);
for (k = 0; k < 10; ++k) {
uint32_t status;
for (j = 0; j < 100; ++j) {
status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
if (status & 2)
break;
if (amdgpu_emu_mode == 1)
msleep(500);
else
mdelay(10);
}
r = 0;
if (status & 2)
break;
DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
UVD_VCPU_CNTL__BLK_RST_MASK,
~UVD_VCPU_CNTL__BLK_RST_MASK);
mdelay(10);
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
~UVD_VCPU_CNTL__BLK_RST_MASK);
mdelay(10);
r = -1;
}
if (r) {
DRM_ERROR("VCN decode not responding, giving up!!!\n");
return r;
}
/* enable master interrupt */
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK,
~UVD_MASTINT_EN__VCPU_EN_MASK);
/* clear the busy bit of VCN_STATUS */
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
ring = &adev->vcn.inst[i].ring_dec;
/* force RBC into idle state */
rb_bufsz = order_base_2(ring->ring_size);
tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
/* program the RB_BASE for ring buffer */
WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
upper_32_bits(ring->gpu_addr));
/* Initialize the ring buffer's read and write pointers */
WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
lower_32_bits(ring->wptr));
fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst[i].ring_enc[0];
WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst[i].ring_enc[1];
WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
}
return 0;
}
static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev,
struct amdgpu_mm_table *table)
{
uint32_t data = 0, loop = 0, size = 0;
uint64_t addr = table->gpu_addr;
struct mmsch_v1_1_init_header *header = NULL;
header = (struct mmsch_v1_1_init_header *)table->cpu_addr;
size = header->total_size;
/*
* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of
* memory descriptor location
*/
WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
/* 2, update vmid of descriptor */
data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID);
data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
/* use domain0 for MM scheduler */
data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, data);
/* 3, notify mmsch about the size of this descriptor */
WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size);
/* 4, set resp to zero */
WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
/*
* 5, kick off the initialization and wait until
* VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
*/
WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
loop = 10;
while ((data & 0x10000002) != 0x10000002) {
udelay(100);
data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
loop--;
if (!loop)
break;
}
if (!loop) {
dev_err(adev->dev,
"failed to init MMSCH, mmMMSCH_VF_MAILBOX_RESP = %x\n",
data);
return -EBUSY;
}
return 0;
}
static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
uint32_t offset, size, tmp, i, rb_bufsz;
uint32_t table_size = 0;
struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
struct mmsch_v1_0_cmd_end end = { { 0 } };
uint32_t *init_table = adev->virt.mm_table.cpu_addr;
struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table;
direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
end.cmd_header.command_type = MMSCH_COMMAND__END;
header->version = MMSCH_VERSION;
header->total_size = sizeof(struct mmsch_v1_1_init_header) >> 2;
init_table += header->total_size;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
header->eng[i].table_offset = header->total_size;
header->eng[i].init_status = 0;
header->eng[i].table_size = 0;
table_size = 0;
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(
SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
/* mc resume*/
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
offset = 0;
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
} else {
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[i].gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[i].gpu_addr));
offset = size;
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0),
AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
}
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE0),
size);
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET1),
0);
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE1),
AMDGPU_VCN_STACK_SIZE);
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[i].gpu_addr + offset +
AMDGPU_VCN_STACK_SIZE));
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[i].gpu_addr + offset +
AMDGPU_VCN_STACK_SIZE));
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET2),
0);
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE2),
AMDGPU_VCN_CONTEXT_SIZE);
ring = &adev->vcn.inst[i].ring_enc[0];
ring->wptr = 0;
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_LO),
lower_32_bits(ring->gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_HI),
upper_32_bits(ring->gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(VCN, i, mmUVD_RB_SIZE),
ring->ring_size / 4);
ring = &adev->vcn.inst[i].ring_dec;
ring->wptr = 0;
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
lower_32_bits(ring->gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
upper_32_bits(ring->gpu_addr));
/* force RBC into idle state */
rb_bufsz = order_base_2(ring->ring_size);
tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(VCN, i, mmUVD_RBC_RB_CNTL), tmp);
/* add end packet */
memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
init_table += sizeof(struct mmsch_v1_0_cmd_end) / 4;
/* refine header */
header->eng[i].table_size = table_size;
header->total_size += table_size;
}
return vcn_v2_5_mmsch_start(adev, &adev->virt.mm_table);
}
static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
{
uint32_t tmp;
/* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
/* wait for read ptr to be equal to write ptr */
tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
/* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
return 0;
}
static int vcn_v2_5_stop(struct amdgpu_device *adev)
{
uint32_t tmp;
int i, r = 0;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
r = vcn_v2_5_stop_dpg_mode(adev, i);
continue;
}
/* wait for vcn idle */
r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
if (r)
return r;
tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
UVD_LMI_STATUS__READ_CLEAN_MASK |
UVD_LMI_STATUS__WRITE_CLEAN_MASK |
UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
if (r)
return r;
/* block LMI UMC channel */
tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
if (r)
return r;
/* block VCPU register access */
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
/* reset VCPU */
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
UVD_VCPU_CNTL__BLK_RST_MASK,
~UVD_VCPU_CNTL__BLK_RST_MASK);
/* disable VCPU clock */
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
~(UVD_VCPU_CNTL__CLK_EN_MASK));
/* clear status */
WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
vcn_v2_5_enable_clock_gating(adev);
/* enable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS),
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
}
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, false);
return 0;
}
static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state)
{
struct amdgpu_ring *ring;
uint32_t reg_data = 0;
int ret_code = 0;
/* pause/unpause if state is changed */
if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
DRM_DEBUG("dpg pause state changed %d -> %d",
adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) &
(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
if (!ret_code) {
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
/* pause DPG */
reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
/* wait for ACK */
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
/* Stall DPG before WPTR/RPTR reset */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
/* Restore */
fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
ring->wptr = 0;
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst[inst_idx].ring_enc[1];
ring->wptr = 0;
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
/* Unstall DPG */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
}
} else {
reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
}
adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
}
return 0;
}
/**
* vcn_v2_5_dec_ring_get_rptr - get read pointer
*
* @ring: amdgpu_ring pointer
*
* Returns the current hardware read pointer
*/
static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR);
}
/**
* vcn_v2_5_dec_ring_get_wptr - get write pointer
*
* @ring: amdgpu_ring pointer
*
* Returns the current hardware write pointer
*/
static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring->use_doorbell)
return *ring->wptr_cpu_addr;
else
return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR);
}
/**
* vcn_v2_5_dec_ring_set_wptr - set write pointer
*
* @ring: amdgpu_ring pointer
*
* Commits the write pointer to the hardware
*/
static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring->use_doorbell) {
*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
} else {
WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
}
}
static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_DEC,
.align_mask = 0xf,
.secure_submission_supported = true,
.get_rptr = vcn_v2_5_dec_ring_get_rptr,
.get_wptr = vcn_v2_5_dec_ring_get_wptr,
.set_wptr = vcn_v2_5_dec_ring_set_wptr,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
6,
.emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
.emit_ib = vcn_v2_0_dec_ring_emit_ib,
.emit_fence = vcn_v2_0_dec_ring_emit_fence,
.emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
.test_ring = vcn_v2_0_dec_ring_test_ring,
.test_ib = amdgpu_vcn_dec_ring_test_ib,
.insert_nop = vcn_v2_0_dec_ring_insert_nop,
.insert_start = vcn_v2_0_dec_ring_insert_start,
.insert_end = vcn_v2_0_dec_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vcn_ring_begin_use,
.end_use = amdgpu_vcn_ring_end_use,
.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
/**
* vcn_v2_5_enc_ring_get_rptr - get enc read pointer
*
* @ring: amdgpu_ring pointer
*
* Returns the current hardware enc read pointer
*/
static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR);
else
return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2);
}
/**
* vcn_v2_5_enc_ring_get_wptr - get enc write pointer
*
* @ring: amdgpu_ring pointer
*
* Returns the current hardware enc write pointer
*/
static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
if (ring->use_doorbell)
return *ring->wptr_cpu_addr;
else
return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR);
} else {
if (ring->use_doorbell)
return *ring->wptr_cpu_addr;
else
return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2);
}
}
/**
* vcn_v2_5_enc_ring_set_wptr - set enc write pointer
*
* @ring: amdgpu_ring pointer
*
* Commits the enc write pointer to the hardware
*/
static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
if (ring->use_doorbell) {
*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
} else {
WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
}
} else {
if (ring->use_doorbell) {
*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
} else {
WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
}
}
}
static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
.nop = VCN_ENC_CMD_NO_OP,
.get_rptr = vcn_v2_5_enc_ring_get_rptr,
.get_wptr = vcn_v2_5_enc_ring_get_wptr,
.set_wptr = vcn_v2_5_enc_ring_set_wptr,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1, /* vcn_v2_0_enc_ring_insert_end */
.emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
.emit_ib = vcn_v2_0_enc_ring_emit_ib,
.emit_fence = vcn_v2_0_enc_ring_emit_fence,
.emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
.test_ring = amdgpu_vcn_enc_ring_test_ring,
.test_ib = amdgpu_vcn_enc_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.insert_end = vcn_v2_0_enc_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vcn_ring_begin_use,
.end_use = amdgpu_vcn_ring_end_use,
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
{
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
adev->vcn.inst[i].ring_dec.me = i;
DRM_INFO("VCN(%d) decode is enabled in VM mode\n", i);
}
}
static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
{
int i, j;
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
if (adev->vcn.harvest_config & (1 << j))
continue;
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
adev->vcn.inst[j].ring_enc[i].me = j;
}
DRM_INFO("VCN(%d) encode is enabled in VM mode\n", j);
}
}
static bool vcn_v2_5_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i, ret = 1;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
}
return ret;
}
static int vcn_v2_5_wait_for_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i, ret = 0;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
ret = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
UVD_STATUS__IDLE);
if (ret)
return ret;
}
return ret;
}
static int vcn_v2_5_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE);
if (amdgpu_sriov_vf(adev))
return 0;
if (enable) {
if (!vcn_v2_5_is_idle(handle))
return -EBUSY;
vcn_v2_5_enable_clock_gating(adev);
} else {
vcn_v2_5_disable_clock_gating(adev);
}
return 0;
}
static int vcn_v2_5_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
if (amdgpu_sriov_vf(adev))
return 0;
if(state == adev->vcn.cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
ret = vcn_v2_5_stop(adev);
else
ret = vcn_v2_5_start(adev);
if(!ret)
adev->vcn.cur_state = state;
return ret;
}
static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
return 0;
}
static int vcn_v2_6_set_ras_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned int type,
enum amdgpu_interrupt_state state)
{
return 0;
}
static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
uint32_t ip_instance;
switch (entry->client_id) {
case SOC15_IH_CLIENTID_VCN:
ip_instance = 0;
break;
case SOC15_IH_CLIENTID_VCN1:
ip_instance = 1;
break;
default:
DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
return 0;
}
DRM_DEBUG("IH: VCN TRAP\n");
switch (entry->src_id) {
case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec);
break;
case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
break;
case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
break;
}
return 0;
}
static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = {
.set = vcn_v2_5_set_interrupt_state,
.process = vcn_v2_5_process_interrupt,
};
static const struct amdgpu_irq_src_funcs vcn_v2_6_ras_irq_funcs = {
.set = vcn_v2_6_set_ras_interrupt_state,
.process = amdgpu_vcn_process_poison_irq,
};
static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
{
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1;
adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v2_6_ras_irq_funcs;
}
}
static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
.name = "vcn_v2_5",
.early_init = vcn_v2_5_early_init,
.late_init = NULL,
.sw_init = vcn_v2_5_sw_init,
.sw_fini = vcn_v2_5_sw_fini,
.hw_init = vcn_v2_5_hw_init,
.hw_fini = vcn_v2_5_hw_fini,
.suspend = vcn_v2_5_suspend,
.resume = vcn_v2_5_resume,
.is_idle = vcn_v2_5_is_idle,
.wait_for_idle = vcn_v2_5_wait_for_idle,
.check_soft_reset = NULL,
.pre_soft_reset = NULL,
.soft_reset = NULL,
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
.set_powergating_state = vcn_v2_5_set_powergating_state,
};
static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
.name = "vcn_v2_6",
.early_init = vcn_v2_5_early_init,
.late_init = NULL,
.sw_init = vcn_v2_5_sw_init,
.sw_fini = vcn_v2_5_sw_fini,
.hw_init = vcn_v2_5_hw_init,
.hw_fini = vcn_v2_5_hw_fini,
.suspend = vcn_v2_5_suspend,
.resume = vcn_v2_5_resume,
.is_idle = vcn_v2_5_is_idle,
.wait_for_idle = vcn_v2_5_wait_for_idle,
.check_soft_reset = NULL,
.pre_soft_reset = NULL,
.soft_reset = NULL,
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
.set_powergating_state = vcn_v2_5_set_powergating_state,
};
const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_VCN,
.major = 2,
.minor = 5,
.rev = 0,
.funcs = &vcn_v2_5_ip_funcs,
};
const struct amdgpu_ip_block_version vcn_v2_6_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_VCN,
.major = 2,
.minor = 6,
.rev = 0,
.funcs = &vcn_v2_6_ip_funcs,
};
static uint32_t vcn_v2_6_query_poison_by_instance(struct amdgpu_device *adev,
uint32_t instance, uint32_t sub_block)
{
uint32_t poison_stat = 0, reg_value = 0;
switch (sub_block) {
case AMDGPU_VCN_V2_6_VCPU_VCODEC:
reg_value = RREG32_SOC15(VCN, instance, mmUVD_RAS_VCPU_VCODEC_STATUS);
poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
break;
default:
break;
}
if (poison_stat)
dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
instance, sub_block);
return poison_stat;
}
static bool vcn_v2_6_query_poison_status(struct amdgpu_device *adev)
{
uint32_t inst, sub;
uint32_t poison_stat = 0;
for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
for (sub = 0; sub < AMDGPU_VCN_V2_6_MAX_SUB_BLOCK; sub++)
poison_stat +=
vcn_v2_6_query_poison_by_instance(adev, inst, sub);
return !!poison_stat;
}
const struct amdgpu_ras_block_hw_ops vcn_v2_6_ras_hw_ops = {
.query_poison_status = vcn_v2_6_query_poison_status,
};
static struct amdgpu_vcn_ras vcn_v2_6_ras = {
.ras_block = {
.hw_ops = &vcn_v2_6_ras_hw_ops,
.ras_late_init = amdgpu_vcn_ras_late_init,
},
};
static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev)
{
switch (adev->ip_versions[VCN_HWIP][0]) {
case IP_VERSION(2, 6, 0):
adev->vcn.ras = &vcn_v2_6_ras;
break;
default:
break;
}
}
| linux-master | drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/debugfs.h>
#include <linux/firmware.h>
#include <linux/dma-mapping.h>
#include "amdgpu.h"
#include "amdgpu_fw_attestation.h"
#include "amdgpu_psp.h"
#include "amdgpu_ucode.h"
#include "soc15_common.h"
#define FW_ATTESTATION_DB_COOKIE 0x143b6a37
#define FW_ATTESTATION_RECORD_VALID 1
#define FW_ATTESTATION_MAX_SIZE 4096
struct FW_ATT_DB_HEADER {
uint32_t AttDbVersion; /* version of the fwar feature */
uint32_t AttDbCookie; /* cookie as an extra check for corrupt data */
};
struct FW_ATT_RECORD {
uint16_t AttFwIdV1; /* Legacy FW Type field */
uint16_t AttFwIdV2; /* V2 FW ID field */
uint32_t AttFWVersion; /* FW Version */
uint16_t AttFWActiveFunctionID; /* The VF ID (only in VF Attestation Table) */
uint8_t AttSource; /* FW source indicator */
uint8_t RecordValid; /* Indicates whether the record is a valid entry */
uint32_t AttFwTaId; /* Ta ID (only in TA Attestation Table) */
};
static ssize_t amdgpu_fw_attestation_debugfs_read(struct file *f,
char __user *buf,
size_t size,
loff_t *pos)
{
struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
uint64_t records_addr = 0;
uint64_t vram_pos = 0;
struct FW_ATT_DB_HEADER fw_att_hdr = {0};
struct FW_ATT_RECORD fw_att_record = {0};
if (size < sizeof(struct FW_ATT_RECORD)) {
DRM_WARN("FW attestation input buffer not enough memory");
return -EINVAL;
}
if ((*pos + sizeof(struct FW_ATT_DB_HEADER)) >= FW_ATTESTATION_MAX_SIZE) {
DRM_WARN("FW attestation out of bounds");
return 0;
}
if (psp_get_fw_attestation_records_addr(&adev->psp, &records_addr)) {
DRM_WARN("Failed to get FW attestation record address");
return -EINVAL;
}
vram_pos = records_addr - adev->gmc.vram_start;
if (*pos == 0) {
amdgpu_device_vram_access(adev,
vram_pos,
(uint32_t *)&fw_att_hdr,
sizeof(struct FW_ATT_DB_HEADER),
false);
if (fw_att_hdr.AttDbCookie != FW_ATTESTATION_DB_COOKIE) {
DRM_WARN("Invalid FW attestation cookie");
return -EINVAL;
}
DRM_INFO("FW attestation version = 0x%X", fw_att_hdr.AttDbVersion);
}
amdgpu_device_vram_access(adev,
vram_pos + sizeof(struct FW_ATT_DB_HEADER) + *pos,
(uint32_t *)&fw_att_record,
sizeof(struct FW_ATT_RECORD),
false);
if (fw_att_record.RecordValid != FW_ATTESTATION_RECORD_VALID)
return 0;
if (copy_to_user(buf, (void *)&fw_att_record, sizeof(struct FW_ATT_RECORD)))
return -EINVAL;
*pos += sizeof(struct FW_ATT_RECORD);
return sizeof(struct FW_ATT_RECORD);
}
static const struct file_operations amdgpu_fw_attestation_debugfs_ops = {
.owner = THIS_MODULE,
.read = amdgpu_fw_attestation_debugfs_read,
.write = NULL,
.llseek = default_llseek
};
static int amdgpu_is_fw_attestation_supported(struct amdgpu_device *adev)
{
if (adev->flags & AMD_IS_APU)
return 0;
if (adev->asic_type >= CHIP_SIENNA_CICHLID)
return 1;
return 0;
}
void amdgpu_fw_attestation_debugfs_init(struct amdgpu_device *adev)
{
if (!amdgpu_is_fw_attestation_supported(adev))
return;
debugfs_create_file("amdgpu_fw_attestation",
0400,
adev_to_drm(adev)->primary->debugfs_root,
adev,
&amdgpu_fw_attestation_debugfs_ops);
}
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "smuio_v13_0.h"
#include "smuio/smuio_13_0_2_offset.h"
#include "smuio/smuio_13_0_2_sh_mask.h"
#define SMUIO_MCM_CONFIG__HOST_GPU_XGMI_MASK 0x00000001L
static u32 smuio_v13_0_get_rom_index_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(SMUIO, 0, regROM_INDEX);
}
static u32 smuio_v13_0_get_rom_data_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(SMUIO, 0, regROM_DATA);
}
static void smuio_v13_0_update_rom_clock_gating(struct amdgpu_device *adev, bool enable)
{
u32 def, data;
/* enable/disable ROM CG is not supported on APU */
if (adev->flags & AMD_IS_APU)
return;
def = data = RREG32_SOC15(SMUIO, 0, regCGTT_ROM_CLK_CTRL0);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
else
data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
if (def != data)
WREG32_SOC15(SMUIO, 0, regCGTT_ROM_CLK_CTRL0, data);
}
static void smuio_v13_0_get_clock_gating_state(struct amdgpu_device *adev, u64 *flags)
{
u32 data;
/* CGTT_ROM_CLK_CTRL0 is not available for APU */
if (adev->flags & AMD_IS_APU)
return;
data = RREG32_SOC15(SMUIO, 0, regCGTT_ROM_CLK_CTRL0);
if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
*flags |= AMD_CG_SUPPORT_ROM_MGCG;
}
/**
* smuio_v13_0_get_die_id - query die id from FCH.
*
* @adev: amdgpu device pointer
*
* Returns die id
*/
static u32 smuio_v13_0_get_die_id(struct amdgpu_device *adev)
{
u32 data, die_id;
data = RREG32_SOC15(SMUIO, 0, regSMUIO_MCM_CONFIG);
die_id = REG_GET_FIELD(data, SMUIO_MCM_CONFIG, DIE_ID);
return die_id;
}
/**
* smuio_v13_0_get_socket_id - query socket id from FCH
*
* @adev: amdgpu device pointer
*
* Returns socket id
*/
static u32 smuio_v13_0_get_socket_id(struct amdgpu_device *adev)
{
u32 data, socket_id;
data = RREG32_SOC15(SMUIO, 0, regSMUIO_MCM_CONFIG);
socket_id = REG_GET_FIELD(data, SMUIO_MCM_CONFIG, SOCKET_ID);
return socket_id;
}
/**
* smuio_v13_0_is_host_gpu_xgmi_supported - detect xgmi interface between cpu and gpu/s.
*
* @adev: amdgpu device pointer
*
* Returns true on success or false otherwise.
*/
static bool smuio_v13_0_is_host_gpu_xgmi_supported(struct amdgpu_device *adev)
{
u32 data;
data = RREG32_SOC15(SMUIO, 0, regSMUIO_MCM_CONFIG);
data = REG_GET_FIELD(data, SMUIO_MCM_CONFIG, TOPOLOGY_ID);
/* data[4:0]
* bit 0 == 0 host-gpu interface is PCIE
* bit 0 == 1 host-gpu interface is Alternate Protocal
* for AMD, this is XGMI
*/
data &= SMUIO_MCM_CONFIG__HOST_GPU_XGMI_MASK;
return data ? true : false;
}
const struct amdgpu_smuio_funcs smuio_v13_0_funcs = {
.get_rom_index_offset = smuio_v13_0_get_rom_index_offset,
.get_rom_data_offset = smuio_v13_0_get_rom_data_offset,
.get_die_id = smuio_v13_0_get_die_id,
.get_socket_id = smuio_v13_0_get_socket_id,
.is_host_gpu_xgmi_supported = smuio_v13_0_is_host_gpu_xgmi_supported,
.update_rom_clock_gating = smuio_v13_0_update_rom_clock_gating,
.get_clock_gating_state = smuio_v13_0_get_clock_gating_state,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_discovery.h"
#include "soc15_hw_ip.h"
#include "discovery.h"
#include "soc15.h"
#include "gfx_v9_0.h"
#include "gfx_v9_4_3.h"
#include "gmc_v9_0.h"
#include "df_v1_7.h"
#include "df_v3_6.h"
#include "df_v4_3.h"
#include "nbio_v6_1.h"
#include "nbio_v7_0.h"
#include "nbio_v7_4.h"
#include "nbio_v7_9.h"
#include "hdp_v4_0.h"
#include "vega10_ih.h"
#include "vega20_ih.h"
#include "sdma_v4_0.h"
#include "sdma_v4_4_2.h"
#include "uvd_v7_0.h"
#include "vce_v4_0.h"
#include "vcn_v1_0.h"
#include "vcn_v2_5.h"
#include "jpeg_v2_5.h"
#include "smuio_v9_0.h"
#include "gmc_v10_0.h"
#include "gmc_v11_0.h"
#include "gfxhub_v2_0.h"
#include "mmhub_v2_0.h"
#include "nbio_v2_3.h"
#include "nbio_v4_3.h"
#include "nbio_v7_2.h"
#include "nbio_v7_7.h"
#include "hdp_v5_0.h"
#include "hdp_v5_2.h"
#include "hdp_v6_0.h"
#include "nv.h"
#include "soc21.h"
#include "navi10_ih.h"
#include "ih_v6_0.h"
#include "ih_v6_1.h"
#include "gfx_v10_0.h"
#include "gfx_v11_0.h"
#include "sdma_v5_0.h"
#include "sdma_v5_2.h"
#include "sdma_v6_0.h"
#include "lsdma_v6_0.h"
#include "vcn_v2_0.h"
#include "jpeg_v2_0.h"
#include "vcn_v3_0.h"
#include "jpeg_v3_0.h"
#include "vcn_v4_0.h"
#include "jpeg_v4_0.h"
#include "vcn_v4_0_3.h"
#include "jpeg_v4_0_3.h"
#include "amdgpu_vkms.h"
#include "mes_v10_1.h"
#include "mes_v11_0.h"
#include "smuio_v11_0.h"
#include "smuio_v11_0_6.h"
#include "smuio_v13_0.h"
#include "smuio_v13_0_3.h"
#include "smuio_v13_0_6.h"
#define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
#define mmRCC_CONFIG_MEMSIZE 0xde3
#define mmMM_INDEX 0x0
#define mmMM_INDEX_HI 0x6
#define mmMM_DATA 0x1
static const char *hw_id_names[HW_ID_MAX] = {
[MP1_HWID] = "MP1",
[MP2_HWID] = "MP2",
[THM_HWID] = "THM",
[SMUIO_HWID] = "SMUIO",
[FUSE_HWID] = "FUSE",
[CLKA_HWID] = "CLKA",
[PWR_HWID] = "PWR",
[GC_HWID] = "GC",
[UVD_HWID] = "UVD",
[AUDIO_AZ_HWID] = "AUDIO_AZ",
[ACP_HWID] = "ACP",
[DCI_HWID] = "DCI",
[DMU_HWID] = "DMU",
[DCO_HWID] = "DCO",
[DIO_HWID] = "DIO",
[XDMA_HWID] = "XDMA",
[DCEAZ_HWID] = "DCEAZ",
[DAZ_HWID] = "DAZ",
[SDPMUX_HWID] = "SDPMUX",
[NTB_HWID] = "NTB",
[IOHC_HWID] = "IOHC",
[L2IMU_HWID] = "L2IMU",
[VCE_HWID] = "VCE",
[MMHUB_HWID] = "MMHUB",
[ATHUB_HWID] = "ATHUB",
[DBGU_NBIO_HWID] = "DBGU_NBIO",
[DFX_HWID] = "DFX",
[DBGU0_HWID] = "DBGU0",
[DBGU1_HWID] = "DBGU1",
[OSSSYS_HWID] = "OSSSYS",
[HDP_HWID] = "HDP",
[SDMA0_HWID] = "SDMA0",
[SDMA1_HWID] = "SDMA1",
[SDMA2_HWID] = "SDMA2",
[SDMA3_HWID] = "SDMA3",
[LSDMA_HWID] = "LSDMA",
[ISP_HWID] = "ISP",
[DBGU_IO_HWID] = "DBGU_IO",
[DF_HWID] = "DF",
[CLKB_HWID] = "CLKB",
[FCH_HWID] = "FCH",
[DFX_DAP_HWID] = "DFX_DAP",
[L1IMU_PCIE_HWID] = "L1IMU_PCIE",
[L1IMU_NBIF_HWID] = "L1IMU_NBIF",
[L1IMU_IOAGR_HWID] = "L1IMU_IOAGR",
[L1IMU3_HWID] = "L1IMU3",
[L1IMU4_HWID] = "L1IMU4",
[L1IMU5_HWID] = "L1IMU5",
[L1IMU6_HWID] = "L1IMU6",
[L1IMU7_HWID] = "L1IMU7",
[L1IMU8_HWID] = "L1IMU8",
[L1IMU9_HWID] = "L1IMU9",
[L1IMU10_HWID] = "L1IMU10",
[L1IMU11_HWID] = "L1IMU11",
[L1IMU12_HWID] = "L1IMU12",
[L1IMU13_HWID] = "L1IMU13",
[L1IMU14_HWID] = "L1IMU14",
[L1IMU15_HWID] = "L1IMU15",
[WAFLC_HWID] = "WAFLC",
[FCH_USB_PD_HWID] = "FCH_USB_PD",
[PCIE_HWID] = "PCIE",
[PCS_HWID] = "PCS",
[DDCL_HWID] = "DDCL",
[SST_HWID] = "SST",
[IOAGR_HWID] = "IOAGR",
[NBIF_HWID] = "NBIF",
[IOAPIC_HWID] = "IOAPIC",
[SYSTEMHUB_HWID] = "SYSTEMHUB",
[NTBCCP_HWID] = "NTBCCP",
[UMC_HWID] = "UMC",
[SATA_HWID] = "SATA",
[USB_HWID] = "USB",
[CCXSEC_HWID] = "CCXSEC",
[XGMI_HWID] = "XGMI",
[XGBE_HWID] = "XGBE",
[MP0_HWID] = "MP0",
};
static int hw_id_map[MAX_HWIP] = {
[GC_HWIP] = GC_HWID,
[HDP_HWIP] = HDP_HWID,
[SDMA0_HWIP] = SDMA0_HWID,
[SDMA1_HWIP] = SDMA1_HWID,
[SDMA2_HWIP] = SDMA2_HWID,
[SDMA3_HWIP] = SDMA3_HWID,
[LSDMA_HWIP] = LSDMA_HWID,
[MMHUB_HWIP] = MMHUB_HWID,
[ATHUB_HWIP] = ATHUB_HWID,
[NBIO_HWIP] = NBIF_HWID,
[MP0_HWIP] = MP0_HWID,
[MP1_HWIP] = MP1_HWID,
[UVD_HWIP] = UVD_HWID,
[VCE_HWIP] = VCE_HWID,
[DF_HWIP] = DF_HWID,
[DCE_HWIP] = DMU_HWID,
[OSSSYS_HWIP] = OSSSYS_HWID,
[SMUIO_HWIP] = SMUIO_HWID,
[PWR_HWIP] = PWR_HWID,
[NBIF_HWIP] = NBIF_HWID,
[THM_HWIP] = THM_HWID,
[CLK_HWIP] = CLKA_HWID,
[UMC_HWIP] = UMC_HWID,
[XGMI_HWIP] = XGMI_HWID,
[DCI_HWIP] = DCI_HWID,
[PCIE_HWIP] = PCIE_HWID,
};
static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
{
u64 tmr_offset, tmr_size, pos;
void *discv_regn;
int ret;
ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
if (ret)
return ret;
pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
/* This region is read-only and reserved from system use */
discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC);
if (discv_regn) {
memcpy(binary, discv_regn, adev->mman.discovery_tmr_size);
memunmap(discv_regn);
return 0;
}
return -ENOENT;
}
static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
uint8_t *binary)
{
uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
int ret = 0;
if (vram_size) {
uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
adev->mman.discovery_tmr_size, false);
} else {
ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
}
return ret;
}
static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
{
const struct firmware *fw;
const char *fw_name;
int r;
switch (amdgpu_discovery) {
case 2:
fw_name = FIRMWARE_IP_DISCOVERY;
break;
default:
dev_warn(adev->dev, "amdgpu_discovery is not set properly\n");
return -EINVAL;
}
r = request_firmware(&fw, fw_name, adev->dev);
if (r) {
dev_err(adev->dev, "can't load firmware \"%s\"\n",
fw_name);
return r;
}
memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
release_firmware(fw);
return 0;
}
static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
{
uint16_t checksum = 0;
int i;
for (i = 0; i < size; i++)
checksum += data[i];
return checksum;
}
static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size,
uint16_t expected)
{
return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
}
static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
{
struct binary_header *bhdr;
bhdr = (struct binary_header *)binary;
return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
}
static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
{
/*
* So far, apply this quirk only on those Navy Flounder boards which
* have a bad harvest table of VCN config.
*/
if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) &&
(adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) {
switch (adev->pdev->revision) {
case 0xC1:
case 0xC2:
case 0xC3:
case 0xC5:
case 0xC7:
case 0xCF:
case 0xDF:
adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1;
break;
default:
break;
}
}
}
static int amdgpu_discovery_init(struct amdgpu_device *adev)
{
struct table_info *info;
struct binary_header *bhdr;
uint16_t offset;
uint16_t size;
uint16_t checksum;
int r;
adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
if (!adev->mman.discovery_bin)
return -ENOMEM;
/* Read from file if it is the preferred option */
if (amdgpu_discovery == 2) {
dev_info(adev->dev, "use ip discovery information from file");
r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
if (r) {
dev_err(adev->dev, "failed to read ip discovery binary from file\n");
r = -EINVAL;
goto out;
}
} else {
r = amdgpu_discovery_read_binary_from_mem(
adev, adev->mman.discovery_bin);
if (r)
goto out;
}
/* check the ip discovery binary signature */
if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
dev_err(adev->dev,
"get invalid ip discovery binary signature\n");
r = -EINVAL;
goto out;
}
bhdr = (struct binary_header *)adev->mman.discovery_bin;
offset = offsetof(struct binary_header, binary_checksum) +
sizeof(bhdr->binary_checksum);
size = le16_to_cpu(bhdr->binary_size) - offset;
checksum = le16_to_cpu(bhdr->binary_checksum);
if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
size, checksum)) {
dev_err(adev->dev, "invalid ip discovery binary checksum\n");
r = -EINVAL;
goto out;
}
info = &bhdr->table_list[IP_DISCOVERY];
offset = le16_to_cpu(info->offset);
checksum = le16_to_cpu(info->checksum);
if (offset) {
struct ip_discovery_header *ihdr =
(struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
dev_err(adev->dev, "invalid ip discovery data table signature\n");
r = -EINVAL;
goto out;
}
if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
le16_to_cpu(ihdr->size), checksum)) {
dev_err(adev->dev, "invalid ip discovery data table checksum\n");
r = -EINVAL;
goto out;
}
}
info = &bhdr->table_list[GC];
offset = le16_to_cpu(info->offset);
checksum = le16_to_cpu(info->checksum);
if (offset) {
struct gpu_info_header *ghdr =
(struct gpu_info_header *)(adev->mman.discovery_bin + offset);
if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
dev_err(adev->dev, "invalid ip discovery gc table id\n");
r = -EINVAL;
goto out;
}
if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
le32_to_cpu(ghdr->size), checksum)) {
dev_err(adev->dev, "invalid gc data table checksum\n");
r = -EINVAL;
goto out;
}
}
info = &bhdr->table_list[HARVEST_INFO];
offset = le16_to_cpu(info->offset);
checksum = le16_to_cpu(info->checksum);
if (offset) {
struct harvest_info_header *hhdr =
(struct harvest_info_header *)(adev->mman.discovery_bin + offset);
if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
r = -EINVAL;
goto out;
}
if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
sizeof(struct harvest_table), checksum)) {
dev_err(adev->dev, "invalid harvest data table checksum\n");
r = -EINVAL;
goto out;
}
}
info = &bhdr->table_list[VCN_INFO];
offset = le16_to_cpu(info->offset);
checksum = le16_to_cpu(info->checksum);
if (offset) {
struct vcn_info_header *vhdr =
(struct vcn_info_header *)(adev->mman.discovery_bin + offset);
if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
dev_err(adev->dev, "invalid ip discovery vcn table id\n");
r = -EINVAL;
goto out;
}
if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
le32_to_cpu(vhdr->size_bytes), checksum)) {
dev_err(adev->dev, "invalid vcn data table checksum\n");
r = -EINVAL;
goto out;
}
}
info = &bhdr->table_list[MALL_INFO];
offset = le16_to_cpu(info->offset);
checksum = le16_to_cpu(info->checksum);
if (0 && offset) {
struct mall_info_header *mhdr =
(struct mall_info_header *)(adev->mman.discovery_bin + offset);
if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
dev_err(adev->dev, "invalid ip discovery mall table id\n");
r = -EINVAL;
goto out;
}
if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
le32_to_cpu(mhdr->size_bytes), checksum)) {
dev_err(adev->dev, "invalid mall data table checksum\n");
r = -EINVAL;
goto out;
}
}
return 0;
out:
kfree(adev->mman.discovery_bin);
adev->mman.discovery_bin = NULL;
return r;
}
static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
void amdgpu_discovery_fini(struct amdgpu_device *adev)
{
amdgpu_discovery_sysfs_fini(adev);
kfree(adev->mman.discovery_bin);
adev->mman.discovery_bin = NULL;
}
static int amdgpu_discovery_validate_ip(const struct ip_v4 *ip)
{
if (ip->instance_number >= HWIP_MAX_INSTANCE) {
DRM_ERROR("Unexpected instance_number (%d) from ip discovery blob\n",
ip->instance_number);
return -EINVAL;
}
if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
le16_to_cpu(ip->hw_id));
return -EINVAL;
}
return 0;
}
static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
uint32_t *vcn_harvest_count)
{
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
struct die_header *dhdr;
struct ip_v4 *ip;
uint16_t die_offset, ip_offset, num_dies, num_ips;
int i, j;
bhdr = (struct binary_header *)adev->mman.discovery_bin;
ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies);
/* scan harvest bit of all IP data structures */
for (i = 0; i < num_dies; i++) {
die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr);
for (j = 0; j < num_ips; j++) {
ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
if (amdgpu_discovery_validate_ip(ip))
goto next_ip;
if (le16_to_cpu(ip->variant) == 1) {
switch (le16_to_cpu(ip->hw_id)) {
case VCN_HWID:
(*vcn_harvest_count)++;
if (ip->instance_number == 0) {
adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
adev->vcn.inst_mask &=
~AMDGPU_VCN_HARVEST_VCN0;
adev->jpeg.inst_mask &=
~AMDGPU_VCN_HARVEST_VCN0;
} else {
adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
adev->vcn.inst_mask &=
~AMDGPU_VCN_HARVEST_VCN1;
adev->jpeg.inst_mask &=
~AMDGPU_VCN_HARVEST_VCN1;
}
break;
case DMU_HWID:
adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
break;
default:
break;
}
}
next_ip:
if (ihdr->base_addr_64_bit)
ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
else
ip_offset += struct_size(ip, base_address, ip->num_base_address);
}
}
}
static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
uint32_t *vcn_harvest_count,
uint32_t *umc_harvest_count)
{
struct binary_header *bhdr;
struct harvest_table *harvest_info;
u16 offset;
int i;
uint32_t umc_harvest_config = 0;
bhdr = (struct binary_header *)adev->mman.discovery_bin;
offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
if (!offset) {
dev_err(adev->dev, "invalid harvest table offset\n");
return;
}
harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
for (i = 0; i < 32; i++) {
if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
break;
switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
case VCN_HWID:
(*vcn_harvest_count)++;
adev->vcn.harvest_config |=
(1 << harvest_info->list[i].number_instance);
adev->jpeg.harvest_config |=
(1 << harvest_info->list[i].number_instance);
adev->vcn.inst_mask &=
~(1U << harvest_info->list[i].number_instance);
adev->jpeg.inst_mask &=
~(1U << harvest_info->list[i].number_instance);
break;
case DMU_HWID:
adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
break;
case UMC_HWID:
umc_harvest_config |=
1 << (le16_to_cpu(harvest_info->list[i].number_instance));
(*umc_harvest_count)++;
break;
case GC_HWID:
adev->gfx.xcc_mask &=
~(1U << harvest_info->list[i].number_instance);
break;
case SDMA0_HWID:
adev->sdma.sdma_mask &=
~(1U << harvest_info->list[i].number_instance);
break;
default:
break;
}
}
adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
~umc_harvest_config;
}
/* ================================================== */
struct ip_hw_instance {
struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
int hw_id;
u8 num_instance;
u8 major, minor, revision;
u8 harvest;
int num_base_addresses;
u32 base_addr[];
};
struct ip_hw_id {
struct kset hw_id_kset; /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
int hw_id;
};
struct ip_die_entry {
struct kset ip_kset; /* ip_discovery/die/#die/, contains ip_hw_id */
u16 num_ips;
};
/* -------------------------------------------------- */
struct ip_hw_instance_attr {
struct attribute attr;
ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
};
static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
{
return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
}
static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
{
return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
}
static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
{
return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
}
static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
{
return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
}
static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
{
return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
}
static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
{
return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
}
static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
{
return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
}
static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
{
ssize_t res, at;
int ii;
for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
/* Here we satisfy the condition that, at + size <= PAGE_SIZE.
*/
if (at + 12 > PAGE_SIZE)
break;
res = sysfs_emit_at(buf, at, "0x%08X\n",
ip_hw_instance->base_addr[ii]);
if (res <= 0)
break;
at += res;
}
return res < 0 ? res : at;
}
static struct ip_hw_instance_attr ip_hw_attr[] = {
__ATTR_RO(hw_id),
__ATTR_RO(num_instance),
__ATTR_RO(major),
__ATTR_RO(minor),
__ATTR_RO(revision),
__ATTR_RO(harvest),
__ATTR_RO(num_base_addresses),
__ATTR_RO(base_addr),
};
static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
ATTRIBUTE_GROUPS(ip_hw_instance);
#define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
#define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
struct attribute *attr,
char *buf)
{
struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
if (!ip_hw_attr->show)
return -EIO;
return ip_hw_attr->show(ip_hw_instance, buf);
}
static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
.show = ip_hw_instance_attr_show,
};
static void ip_hw_instance_release(struct kobject *kobj)
{
struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
kfree(ip_hw_instance);
}
static const struct kobj_type ip_hw_instance_ktype = {
.release = ip_hw_instance_release,
.sysfs_ops = &ip_hw_instance_sysfs_ops,
.default_groups = ip_hw_instance_groups,
};
/* -------------------------------------------------- */
#define to_ip_hw_id(x) container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
static void ip_hw_id_release(struct kobject *kobj)
{
struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
if (!list_empty(&ip_hw_id->hw_id_kset.list))
DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
kfree(ip_hw_id);
}
static const struct kobj_type ip_hw_id_ktype = {
.release = ip_hw_id_release,
.sysfs_ops = &kobj_sysfs_ops,
};
/* -------------------------------------------------- */
static void die_kobj_release(struct kobject *kobj);
static void ip_disc_release(struct kobject *kobj);
struct ip_die_entry_attribute {
struct attribute attr;
ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
};
#define to_ip_die_entry_attr(x) container_of(x, struct ip_die_entry_attribute, attr)
static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
{
return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
}
/* If there are more ip_die_entry attrs, other than the number of IPs,
* we can make this intro an array of attrs, and then initialize
* ip_die_entry_attrs in a loop.
*/
static struct ip_die_entry_attribute num_ips_attr =
__ATTR_RO(num_ips);
static struct attribute *ip_die_entry_attrs[] = {
&num_ips_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
#define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
struct attribute *attr,
char *buf)
{
struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
if (!ip_die_entry_attr->show)
return -EIO;
return ip_die_entry_attr->show(ip_die_entry, buf);
}
static void ip_die_entry_release(struct kobject *kobj)
{
struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
if (!list_empty(&ip_die_entry->ip_kset.list))
DRM_ERROR("ip_die_entry->ip_kset is not empty");
kfree(ip_die_entry);
}
static const struct sysfs_ops ip_die_entry_sysfs_ops = {
.show = ip_die_entry_attr_show,
};
static const struct kobj_type ip_die_entry_ktype = {
.release = ip_die_entry_release,
.sysfs_ops = &ip_die_entry_sysfs_ops,
.default_groups = ip_die_entry_groups,
};
static const struct kobj_type die_kobj_ktype = {
.release = die_kobj_release,
.sysfs_ops = &kobj_sysfs_ops,
};
static const struct kobj_type ip_discovery_ktype = {
.release = ip_disc_release,
.sysfs_ops = &kobj_sysfs_ops,
};
struct ip_discovery_top {
struct kobject kobj; /* ip_discovery/ */
struct kset die_kset; /* ip_discovery/die/, contains ip_die_entry */
struct amdgpu_device *adev;
};
static void die_kobj_release(struct kobject *kobj)
{
struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
struct ip_discovery_top,
die_kset);
if (!list_empty(&ip_top->die_kset.list))
DRM_ERROR("ip_top->die_kset is not empty");
}
static void ip_disc_release(struct kobject *kobj)
{
struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
kobj);
struct amdgpu_device *adev = ip_top->adev;
adev->ip_top = NULL;
kfree(ip_top);
}
static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
uint16_t hw_id, uint8_t inst)
{
uint8_t harvest = 0;
/* Until a uniform way is figured, get mask based on hwid */
switch (hw_id) {
case VCN_HWID:
harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
break;
case DMU_HWID:
if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
harvest = 0x1;
break;
case UMC_HWID:
/* TODO: It needs another parsing; for now, ignore.*/
break;
case GC_HWID:
harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0;
break;
case SDMA0_HWID:
harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0;
break;
default:
break;
}
return harvest;
}
static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
struct ip_die_entry *ip_die_entry,
const size_t _ip_offset, const int num_ips,
bool reg_base_64)
{
int ii, jj, kk, res;
DRM_DEBUG("num_ips:%d", num_ips);
/* Find all IPs of a given HW ID, and add their instance to
* #die/#hw_id/#instance/<attributes>
*/
for (ii = 0; ii < HW_ID_MAX; ii++) {
struct ip_hw_id *ip_hw_id = NULL;
size_t ip_offset = _ip_offset;
for (jj = 0; jj < num_ips; jj++) {
struct ip_v4 *ip;
struct ip_hw_instance *ip_hw_instance;
ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
if (amdgpu_discovery_validate_ip(ip) ||
le16_to_cpu(ip->hw_id) != ii)
goto next_ip;
DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
/* We have a hw_id match; register the hw
* block if not yet registered.
*/
if (!ip_hw_id) {
ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL);
if (!ip_hw_id)
return -ENOMEM;
ip_hw_id->hw_id = ii;
kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
res = kset_register(&ip_hw_id->hw_id_kset);
if (res) {
DRM_ERROR("Couldn't register ip_hw_id kset");
kfree(ip_hw_id);
return res;
}
if (hw_id_names[ii]) {
res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
&ip_hw_id->hw_id_kset.kobj,
hw_id_names[ii]);
if (res) {
DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
hw_id_names[ii],
kobject_name(&ip_die_entry->ip_kset.kobj));
}
}
}
/* Now register its instance.
*/
ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
base_addr,
ip->num_base_address),
GFP_KERNEL);
if (!ip_hw_instance) {
DRM_ERROR("no memory for ip_hw_instance");
return -ENOMEM;
}
ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
ip_hw_instance->num_instance = ip->instance_number;
ip_hw_instance->major = ip->major;
ip_hw_instance->minor = ip->minor;
ip_hw_instance->revision = ip->revision;
ip_hw_instance->harvest =
amdgpu_discovery_get_harvest_info(
adev, ip_hw_instance->hw_id,
ip_hw_instance->num_instance);
ip_hw_instance->num_base_addresses = ip->num_base_address;
for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) {
if (reg_base_64)
ip_hw_instance->base_addr[kk] =
lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF;
else
ip_hw_instance->base_addr[kk] = ip->base_address[kk];
}
kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
res = kobject_add(&ip_hw_instance->kobj, NULL,
"%d", ip_hw_instance->num_instance);
next_ip:
if (reg_base_64)
ip_offset += struct_size(ip, base_address_64,
ip->num_base_address);
else
ip_offset += struct_size(ip, base_address,
ip->num_base_address);
}
}
return 0;
}
static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
{
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
struct die_header *dhdr;
struct kset *die_kset = &adev->ip_top->die_kset;
u16 num_dies, die_offset, num_ips;
size_t ip_offset;
int ii, res;
bhdr = (struct binary_header *)adev->mman.discovery_bin;
ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies);
DRM_DEBUG("number of dies: %d\n", num_dies);
for (ii = 0; ii < num_dies; ii++) {
struct ip_die_entry *ip_die_entry;
die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr);
/* Add the die to the kset.
*
* dhdr->die_id == ii, which was checked in
* amdgpu_discovery_reg_base_init().
*/
ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL);
if (!ip_die_entry)
return -ENOMEM;
ip_die_entry->num_ips = num_ips;
kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
ip_die_entry->ip_kset.kobj.kset = die_kset;
ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
res = kset_register(&ip_die_entry->ip_kset);
if (res) {
DRM_ERROR("Couldn't register ip_die_entry kset");
kfree(ip_die_entry);
return res;
}
amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit);
}
return 0;
}
static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
{
struct kset *die_kset;
int res, ii;
if (!adev->mman.discovery_bin)
return -EINVAL;
adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
if (!adev->ip_top)
return -ENOMEM;
adev->ip_top->adev = adev;
res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
&adev->dev->kobj, "ip_discovery");
if (res) {
DRM_ERROR("Couldn't init and add ip_discovery/");
goto Err;
}
die_kset = &adev->ip_top->die_kset;
kobject_set_name(&die_kset->kobj, "%s", "die");
die_kset->kobj.parent = &adev->ip_top->kobj;
die_kset->kobj.ktype = &die_kobj_ktype;
res = kset_register(&adev->ip_top->die_kset);
if (res) {
DRM_ERROR("Couldn't register die_kset");
goto Err;
}
for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
ip_hw_instance_attrs[ii] = NULL;
res = amdgpu_discovery_sysfs_recurse(adev);
return res;
Err:
kobject_put(&adev->ip_top->kobj);
return res;
}
/* -------------------------------------------------- */
#define list_to_kobj(el) container_of(el, struct kobject, entry)
static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
{
struct list_head *el, *tmp;
struct kset *hw_id_kset;
hw_id_kset = &ip_hw_id->hw_id_kset;
spin_lock(&hw_id_kset->list_lock);
list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
list_del_init(el);
spin_unlock(&hw_id_kset->list_lock);
/* kobject is embedded in ip_hw_instance */
kobject_put(list_to_kobj(el));
spin_lock(&hw_id_kset->list_lock);
}
spin_unlock(&hw_id_kset->list_lock);
kobject_put(&ip_hw_id->hw_id_kset.kobj);
}
static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
{
struct list_head *el, *tmp;
struct kset *ip_kset;
ip_kset = &ip_die_entry->ip_kset;
spin_lock(&ip_kset->list_lock);
list_for_each_prev_safe(el, tmp, &ip_kset->list) {
list_del_init(el);
spin_unlock(&ip_kset->list_lock);
amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
spin_lock(&ip_kset->list_lock);
}
spin_unlock(&ip_kset->list_lock);
kobject_put(&ip_die_entry->ip_kset.kobj);
}
static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
{
struct list_head *el, *tmp;
struct kset *die_kset;
die_kset = &adev->ip_top->die_kset;
spin_lock(&die_kset->list_lock);
list_for_each_prev_safe(el, tmp, &die_kset->list) {
list_del_init(el);
spin_unlock(&die_kset->list_lock);
amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
spin_lock(&die_kset->list_lock);
}
spin_unlock(&die_kset->list_lock);
kobject_put(&adev->ip_top->die_kset.kobj);
kobject_put(&adev->ip_top->kobj);
}
/* ================================================== */
static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
{
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
struct die_header *dhdr;
struct ip_v4 *ip;
uint16_t die_offset;
uint16_t ip_offset;
uint16_t num_dies;
uint16_t num_ips;
uint8_t num_base_address;
int hw_ip;
int i, j, k;
int r;
r = amdgpu_discovery_init(adev);
if (r) {
DRM_ERROR("amdgpu_discovery_init failed\n");
return r;
}
adev->gfx.xcc_mask = 0;
adev->sdma.sdma_mask = 0;
adev->vcn.inst_mask = 0;
adev->jpeg.inst_mask = 0;
bhdr = (struct binary_header *)adev->mman.discovery_bin;
ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies);
DRM_DEBUG("number of dies: %d\n", num_dies);
for (i = 0; i < num_dies; i++) {
die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr);
if (le16_to_cpu(dhdr->die_id) != i) {
DRM_ERROR("invalid die id %d, expected %d\n",
le16_to_cpu(dhdr->die_id), i);
return -EINVAL;
}
DRM_DEBUG("number of hardware IPs on die%d: %d\n",
le16_to_cpu(dhdr->die_id), num_ips);
for (j = 0; j < num_ips; j++) {
ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
if (amdgpu_discovery_validate_ip(ip))
goto next_ip;
num_base_address = ip->num_base_address;
DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
hw_id_names[le16_to_cpu(ip->hw_id)],
le16_to_cpu(ip->hw_id),
ip->instance_number,
ip->major, ip->minor,
ip->revision);
if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
/* Bit [5:0]: original revision value
* Bit [7:6]: en/decode capability:
* 0b00 : VCN function normally
* 0b10 : encode is disabled
* 0b01 : decode is disabled
*/
adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
ip->revision & 0xc0;
ip->revision &= ~0xc0;
if (adev->vcn.num_vcn_inst <
AMDGPU_MAX_VCN_INSTANCES) {
adev->vcn.num_vcn_inst++;
adev->vcn.inst_mask |=
(1U << ip->instance_number);
adev->jpeg.inst_mask |=
(1U << ip->instance_number);
} else {
dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
adev->vcn.num_vcn_inst + 1,
AMDGPU_MAX_VCN_INSTANCES);
}
}
if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
if (adev->sdma.num_instances <
AMDGPU_MAX_SDMA_INSTANCES) {
adev->sdma.num_instances++;
adev->sdma.sdma_mask |=
(1U << ip->instance_number);
} else {
dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
adev->sdma.num_instances + 1,
AMDGPU_MAX_SDMA_INSTANCES);
}
}
if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
adev->gmc.num_umc++;
adev->umc.node_inst_num++;
}
if (le16_to_cpu(ip->hw_id) == GC_HWID)
adev->gfx.xcc_mask |=
(1U << ip->instance_number);
for (k = 0; k < num_base_address; k++) {
/*
* convert the endianness of base addresses in place,
* so that we don't need to convert them when accessing adev->reg_offset.
*/
if (ihdr->base_addr_64_bit)
/* Truncate the 64bit base address from ip discovery
* and only store lower 32bit ip base in reg_offset[].
* Bits > 32 follows ASIC specific format, thus just
* discard them and handle it within specific ASIC.
* By this way reg_offset[] and related helpers can
* stay unchanged.
* The base address is in dwords, thus clear the
* highest 2 bits to store.
*/
ip->base_address[k] =
lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF;
else
ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
}
for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) &&
hw_id_map[hw_ip] != 0) {
DRM_DEBUG("set register base offset for %s\n",
hw_id_names[le16_to_cpu(ip->hw_id)]);
adev->reg_offset[hw_ip][ip->instance_number] =
ip->base_address;
/* Instance support is somewhat inconsistent.
* SDMA is a good example. Sienna cichlid has 4 total
* SDMA instances, each enumerated separately (HWIDs
* 42, 43, 68, 69). Arcturus has 8 total SDMA instances,
* but they are enumerated as multiple instances of the
* same HWIDs (4x HWID 42, 4x HWID 43). UMC is another
* example. On most chips there are multiple instances
* with the same HWID.
*/
adev->ip_versions[hw_ip][ip->instance_number] =
IP_VERSION(ip->major, ip->minor, ip->revision);
}
}
next_ip:
if (ihdr->base_addr_64_bit)
ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
else
ip_offset += struct_size(ip, base_address, ip->num_base_address);
}
}
return 0;
}
static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
{
int vcn_harvest_count = 0;
int umc_harvest_count = 0;
/*
* Harvest table does not fit Navi1x and legacy GPUs,
* so read harvest bit per IP data structure to set
* harvest configuration.
*/
if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 2, 0) &&
adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3)) {
if ((adev->pdev->device == 0x731E &&
(adev->pdev->revision == 0xC6 ||
adev->pdev->revision == 0xC7)) ||
(adev->pdev->device == 0x7340 &&
adev->pdev->revision == 0xC9) ||
(adev->pdev->device == 0x7360 &&
adev->pdev->revision == 0xC7))
amdgpu_discovery_read_harvest_bit_per_ip(adev,
&vcn_harvest_count);
} else {
amdgpu_discovery_read_from_harvest_table(adev,
&vcn_harvest_count,
&umc_harvest_count);
}
amdgpu_discovery_harvest_config_quirk(adev);
if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
}
if (umc_harvest_count < adev->gmc.num_umc) {
adev->gmc.num_umc -= umc_harvest_count;
}
}
union gc_info {
struct gc_info_v1_0 v1;
struct gc_info_v1_1 v1_1;
struct gc_info_v1_2 v1_2;
struct gc_info_v2_0 v2;
struct gc_info_v2_1 v2_1;
};
static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
{
struct binary_header *bhdr;
union gc_info *gc_info;
u16 offset;
if (!adev->mman.discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
bhdr = (struct binary_header *)adev->mman.discovery_bin;
offset = le16_to_cpu(bhdr->table_list[GC].offset);
if (!offset)
return 0;
gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
switch (le16_to_cpu(gc_info->v1.header.version_major)) {
case 1:
adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
if (gc_info->v1.header.version_minor >= 1) {
adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
}
if (gc_info->v1.header.version_minor >= 2) {
adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
}
break;
case 2:
adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
if (gc_info->v2.header.version_minor == 1) {
adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh);
adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu);
adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */
adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc);
adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc);
adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc);
adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */
}
break;
default:
dev_err(adev->dev,
"Unhandled GC info table %d.%d\n",
le16_to_cpu(gc_info->v1.header.version_major),
le16_to_cpu(gc_info->v1.header.version_minor));
return -EINVAL;
}
return 0;
}
union mall_info {
struct mall_info_v1_0 v1;
struct mall_info_v2_0 v2;
};
static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
{
struct binary_header *bhdr;
union mall_info *mall_info;
u32 u, mall_size_per_umc, m_s_present, half_use;
u64 mall_size;
u16 offset;
if (!adev->mman.discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
bhdr = (struct binary_header *)adev->mman.discovery_bin;
offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
if (!offset)
return 0;
mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
switch (le16_to_cpu(mall_info->v1.header.version_major)) {
case 1:
mall_size = 0;
mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
half_use = le32_to_cpu(mall_info->v1.m_half_use);
for (u = 0; u < adev->gmc.num_umc; u++) {
if (m_s_present & (1 << u))
mall_size += mall_size_per_umc * 2;
else if (half_use & (1 << u))
mall_size += mall_size_per_umc / 2;
else
mall_size += mall_size_per_umc;
}
adev->gmc.mall_size = mall_size;
adev->gmc.m_half_use = half_use;
break;
case 2:
mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
adev->gmc.mall_size = mall_size_per_umc * adev->gmc.num_umc;
break;
default:
dev_err(adev->dev,
"Unhandled MALL info table %d.%d\n",
le16_to_cpu(mall_info->v1.header.version_major),
le16_to_cpu(mall_info->v1.header.version_minor));
return -EINVAL;
}
return 0;
}
union vcn_info {
struct vcn_info_v1_0 v1;
};
static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
{
struct binary_header *bhdr;
union vcn_info *vcn_info;
u16 offset;
int v;
if (!adev->mman.discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
/* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
* which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES
* but that may change in the future with new GPUs so keep this
* check for defensive purposes.
*/
if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
dev_err(adev->dev, "invalid vcn instances\n");
return -EINVAL;
}
bhdr = (struct binary_header *)adev->mman.discovery_bin;
offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
if (!offset)
return 0;
vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
case 1:
/* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
* so this won't overflow.
*/
for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
adev->vcn.vcn_codec_disable_mask[v] =
le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
}
break;
default:
dev_err(adev->dev,
"Unhandled VCN info table %d.%d\n",
le16_to_cpu(vcn_info->v1.header.version_major),
le16_to_cpu(vcn_info->v1.header.version_minor));
return -EINVAL;
}
return 0;
}
static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
{
/* what IP to use for this? */
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 0, 1):
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 1):
case IP_VERSION(9, 2, 2):
case IP_VERSION(9, 3, 0):
case IP_VERSION(9, 4, 0):
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
case IP_VERSION(9, 4, 3):
amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
break;
case IP_VERSION(10, 1, 10):
case IP_VERSION(10, 1, 1):
case IP_VERSION(10, 1, 2):
case IP_VERSION(10, 1, 3):
case IP_VERSION(10, 1, 4):
case IP_VERSION(10, 3, 0):
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 2):
case IP_VERSION(10, 3, 3):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
case IP_VERSION(10, 3, 6):
case IP_VERSION(10, 3, 7):
amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
break;
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
case IP_VERSION(11, 0, 4):
amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
break;
default:
dev_err(adev->dev,
"Failed to add common ip block(GC_HWIP:0x%x)\n",
adev->ip_versions[GC_HWIP][0]);
return -EINVAL;
}
return 0;
}
static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
{
/* use GC or MMHUB IP version */
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 0, 1):
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 1):
case IP_VERSION(9, 2, 2):
case IP_VERSION(9, 3, 0):
case IP_VERSION(9, 4, 0):
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
case IP_VERSION(9, 4, 3):
amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
break;
case IP_VERSION(10, 1, 10):
case IP_VERSION(10, 1, 1):
case IP_VERSION(10, 1, 2):
case IP_VERSION(10, 1, 3):
case IP_VERSION(10, 1, 4):
case IP_VERSION(10, 3, 0):
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 2):
case IP_VERSION(10, 3, 3):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
case IP_VERSION(10, 3, 6):
case IP_VERSION(10, 3, 7):
amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
break;
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
case IP_VERSION(11, 0, 4):
amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
break;
default:
dev_err(adev->dev,
"Failed to add gmc ip block(GC_HWIP:0x%x)\n",
adev->ip_versions[GC_HWIP][0]);
return -EINVAL;
}
return 0;
}
static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
{
switch (adev->ip_versions[OSSSYS_HWIP][0]) {
case IP_VERSION(4, 0, 0):
case IP_VERSION(4, 0, 1):
case IP_VERSION(4, 1, 0):
case IP_VERSION(4, 1, 1):
case IP_VERSION(4, 3, 0):
amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
break;
case IP_VERSION(4, 2, 0):
case IP_VERSION(4, 2, 1):
case IP_VERSION(4, 4, 0):
case IP_VERSION(4, 4, 2):
amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
break;
case IP_VERSION(5, 0, 0):
case IP_VERSION(5, 0, 1):
case IP_VERSION(5, 0, 2):
case IP_VERSION(5, 0, 3):
case IP_VERSION(5, 2, 0):
case IP_VERSION(5, 2, 1):
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
break;
case IP_VERSION(6, 0, 0):
case IP_VERSION(6, 0, 1):
case IP_VERSION(6, 0, 2):
amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block);
break;
case IP_VERSION(6, 1, 0):
amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block);
break;
default:
dev_err(adev->dev,
"Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
adev->ip_versions[OSSSYS_HWIP][0]);
return -EINVAL;
}
return 0;
}
static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
{
switch (adev->ip_versions[MP0_HWIP][0]) {
case IP_VERSION(9, 0, 0):
amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
break;
case IP_VERSION(10, 0, 0):
case IP_VERSION(10, 0, 1):
amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
break;
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 4):
case IP_VERSION(11, 0, 5):
case IP_VERSION(11, 0, 9):
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
case IP_VERSION(11, 5, 0):
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
break;
case IP_VERSION(11, 0, 8):
amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
break;
case IP_VERSION(11, 0, 3):
case IP_VERSION(12, 0, 1):
amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
break;
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 3):
case IP_VERSION(13, 0, 5):
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 8):
case IP_VERSION(13, 0, 10):
case IP_VERSION(13, 0, 11):
case IP_VERSION(14, 0, 0):
amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
break;
case IP_VERSION(13, 0, 4):
amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
break;
default:
dev_err(adev->dev,
"Failed to add psp ip block(MP0_HWIP:0x%x)\n",
adev->ip_versions[MP0_HWIP][0]);
return -EINVAL;
}
return 0;
}
static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
{
switch (adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(9, 0, 0):
case IP_VERSION(10, 0, 0):
case IP_VERSION(10, 0, 1):
case IP_VERSION(11, 0, 2):
if (adev->asic_type == CHIP_ARCTURUS)
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
else
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
break;
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 5):
case IP_VERSION(11, 0, 9):
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 8):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
case IP_VERSION(11, 5, 0):
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
break;
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
break;
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 3):
case IP_VERSION(13, 0, 4):
case IP_VERSION(13, 0, 5):
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 8):
case IP_VERSION(13, 0, 10):
case IP_VERSION(13, 0, 11):
amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
break;
default:
dev_err(adev->dev,
"Failed to add smu ip block(MP1_HWIP:0x%x)\n",
adev->ip_versions[MP1_HWIP][0]);
return -EINVAL;
}
return 0;
}
#if defined(CONFIG_DRM_AMD_DC)
static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev)
{
amdgpu_device_set_sriov_virtual_display(adev);
amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
}
#endif
static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
{
if (adev->enable_virtual_display) {
amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
return 0;
}
if (!amdgpu_device_has_dc_support(adev))
return 0;
#if defined(CONFIG_DRM_AMD_DC)
if (adev->ip_versions[DCE_HWIP][0]) {
switch (adev->ip_versions[DCE_HWIP][0]) {
case IP_VERSION(1, 0, 0):
case IP_VERSION(1, 0, 1):
case IP_VERSION(2, 0, 2):
case IP_VERSION(2, 0, 0):
case IP_VERSION(2, 0, 3):
case IP_VERSION(2, 1, 0):
case IP_VERSION(3, 0, 0):
case IP_VERSION(3, 0, 2):
case IP_VERSION(3, 0, 3):
case IP_VERSION(3, 0, 1):
case IP_VERSION(3, 1, 2):
case IP_VERSION(3, 1, 3):
case IP_VERSION(3, 1, 4):
case IP_VERSION(3, 1, 5):
case IP_VERSION(3, 1, 6):
case IP_VERSION(3, 2, 0):
case IP_VERSION(3, 2, 1):
if (amdgpu_sriov_vf(adev))
amdgpu_discovery_set_sriov_display(adev);
else
amdgpu_device_ip_block_add(adev, &dm_ip_block);
break;
default:
dev_err(adev->dev,
"Failed to add dm ip block(DCE_HWIP:0x%x)\n",
adev->ip_versions[DCE_HWIP][0]);
return -EINVAL;
}
} else if (adev->ip_versions[DCI_HWIP][0]) {
switch (adev->ip_versions[DCI_HWIP][0]) {
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
case IP_VERSION(12, 1, 0):
if (amdgpu_sriov_vf(adev))
amdgpu_discovery_set_sriov_display(adev);
else
amdgpu_device_ip_block_add(adev, &dm_ip_block);
break;
default:
dev_err(adev->dev,
"Failed to add dm ip block(DCI_HWIP:0x%x)\n",
adev->ip_versions[DCI_HWIP][0]);
return -EINVAL;
}
}
#endif
return 0;
}
static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
{
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 0, 1):
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 1):
case IP_VERSION(9, 2, 2):
case IP_VERSION(9, 3, 0):
case IP_VERSION(9, 4, 0):
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
break;
case IP_VERSION(9, 4, 3):
if (!amdgpu_exp_hw_support)
return -EINVAL;
amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
break;
case IP_VERSION(10, 1, 10):
case IP_VERSION(10, 1, 2):
case IP_VERSION(10, 1, 1):
case IP_VERSION(10, 1, 3):
case IP_VERSION(10, 1, 4):
case IP_VERSION(10, 3, 0):
case IP_VERSION(10, 3, 2):
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
case IP_VERSION(10, 3, 6):
case IP_VERSION(10, 3, 3):
case IP_VERSION(10, 3, 7):
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
break;
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
case IP_VERSION(11, 0, 4):
amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
break;
default:
dev_err(adev->dev,
"Failed to add gfx ip block(GC_HWIP:0x%x)\n",
adev->ip_versions[GC_HWIP][0]);
return -EINVAL;
}
return 0;
}
static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
{
switch (adev->ip_versions[SDMA0_HWIP][0]) {
case IP_VERSION(4, 0, 0):
case IP_VERSION(4, 0, 1):
case IP_VERSION(4, 1, 0):
case IP_VERSION(4, 1, 1):
case IP_VERSION(4, 1, 2):
case IP_VERSION(4, 2, 0):
case IP_VERSION(4, 2, 2):
case IP_VERSION(4, 4, 0):
amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
break;
case IP_VERSION(4, 4, 2):
amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
break;
case IP_VERSION(5, 0, 0):
case IP_VERSION(5, 0, 1):
case IP_VERSION(5, 0, 2):
case IP_VERSION(5, 0, 5):
amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
break;
case IP_VERSION(5, 2, 0):
case IP_VERSION(5, 2, 2):
case IP_VERSION(5, 2, 4):
case IP_VERSION(5, 2, 5):
case IP_VERSION(5, 2, 6):
case IP_VERSION(5, 2, 3):
case IP_VERSION(5, 2, 1):
case IP_VERSION(5, 2, 7):
amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
break;
case IP_VERSION(6, 0, 0):
case IP_VERSION(6, 0, 1):
case IP_VERSION(6, 0, 2):
case IP_VERSION(6, 0, 3):
case IP_VERSION(6, 1, 0):
amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
break;
default:
dev_err(adev->dev,
"Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
adev->ip_versions[SDMA0_HWIP][0]);
return -EINVAL;
}
return 0;
}
static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
{
if (adev->ip_versions[VCE_HWIP][0]) {
switch (adev->ip_versions[UVD_HWIP][0]) {
case IP_VERSION(7, 0, 0):
case IP_VERSION(7, 2, 0):
/* UVD is not supported on vega20 SR-IOV */
if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
break;
default:
dev_err(adev->dev,
"Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
adev->ip_versions[UVD_HWIP][0]);
return -EINVAL;
}
switch (adev->ip_versions[VCE_HWIP][0]) {
case IP_VERSION(4, 0, 0):
case IP_VERSION(4, 1, 0):
/* VCE is not supported on vega20 SR-IOV */
if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
break;
default:
dev_err(adev->dev,
"Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
adev->ip_versions[VCE_HWIP][0]);
return -EINVAL;
}
} else {
switch (adev->ip_versions[UVD_HWIP][0]) {
case IP_VERSION(1, 0, 0):
case IP_VERSION(1, 0, 1):
amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
break;
case IP_VERSION(2, 0, 0):
case IP_VERSION(2, 0, 2):
case IP_VERSION(2, 2, 0):
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
if (!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
break;
case IP_VERSION(2, 0, 3):
break;
case IP_VERSION(2, 5, 0):
amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
break;
case IP_VERSION(2, 6, 0):
amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
break;
case IP_VERSION(3, 0, 0):
case IP_VERSION(3, 0, 16):
case IP_VERSION(3, 1, 1):
case IP_VERSION(3, 1, 2):
case IP_VERSION(3, 0, 2):
amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
if (!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
break;
case IP_VERSION(3, 0, 33):
amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
break;
case IP_VERSION(4, 0, 0):
case IP_VERSION(4, 0, 2):
case IP_VERSION(4, 0, 4):
amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
break;
case IP_VERSION(4, 0, 3):
amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block);
amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block);
break;
default:
dev_err(adev->dev,
"Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
adev->ip_versions[UVD_HWIP][0]);
return -EINVAL;
}
}
return 0;
}
static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
{
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(10, 1, 10):
case IP_VERSION(10, 1, 1):
case IP_VERSION(10, 1, 2):
case IP_VERSION(10, 1, 3):
case IP_VERSION(10, 1, 4):
case IP_VERSION(10, 3, 0):
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 2):
case IP_VERSION(10, 3, 3):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
case IP_VERSION(10, 3, 6):
if (amdgpu_mes) {
amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
adev->enable_mes = true;
if (amdgpu_mes_kiq)
adev->enable_mes_kiq = true;
}
break;
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
case IP_VERSION(11, 0, 4):
amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
adev->enable_mes = true;
adev->enable_mes_kiq = true;
break;
default:
break;
}
return 0;
}
static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)
{
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 4, 3):
aqua_vanjaram_init_soc_config(adev);
break;
default:
break;
}
}
int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
{
int r;
switch (adev->asic_type) {
case CHIP_VEGA10:
vega10_reg_base_init(adev);
adev->sdma.num_instances = 2;
adev->gmc.num_umc = 4;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
break;
case CHIP_VEGA12:
vega10_reg_base_init(adev);
adev->sdma.num_instances = 2;
adev->gmc.num_umc = 4;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
break;
case CHIP_RAVEN:
vega10_reg_base_init(adev);
adev->sdma.num_instances = 1;
adev->vcn.num_vcn_inst = 1;
adev->gmc.num_umc = 2;
if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
} else {
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
}
break;
case CHIP_VEGA20:
vega20_reg_base_init(adev);
adev->sdma.num_instances = 2;
adev->gmc.num_umc = 8;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
break;
case CHIP_ARCTURUS:
arct_reg_base_init(adev);
adev->sdma.num_instances = 8;
adev->vcn.num_vcn_inst = 2;
adev->gmc.num_umc = 8;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
break;
case CHIP_ALDEBARAN:
aldebaran_reg_base_init(adev);
adev->sdma.num_instances = 5;
adev->vcn.num_vcn_inst = 2;
adev->gmc.num_umc = 4;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
break;
default:
r = amdgpu_discovery_reg_base_init(adev);
if (r)
return -EINVAL;
amdgpu_discovery_harvest_ip(adev);
amdgpu_discovery_get_gfx_info(adev);
amdgpu_discovery_get_mall_info(adev);
amdgpu_discovery_get_vcn_info(adev);
break;
}
amdgpu_discovery_init_soc_config(adev);
amdgpu_discovery_sysfs_init(adev);
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 0, 1):
case IP_VERSION(9, 2, 1):
case IP_VERSION(9, 4, 0):
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
case IP_VERSION(9, 4, 3):
adev->family = AMDGPU_FAMILY_AI;
break;
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 2):
case IP_VERSION(9, 3, 0):
adev->family = AMDGPU_FAMILY_RV;
break;
case IP_VERSION(10, 1, 10):
case IP_VERSION(10, 1, 1):
case IP_VERSION(10, 1, 2):
case IP_VERSION(10, 1, 3):
case IP_VERSION(10, 1, 4):
case IP_VERSION(10, 3, 0):
case IP_VERSION(10, 3, 2):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
adev->family = AMDGPU_FAMILY_NV;
break;
case IP_VERSION(10, 3, 1):
adev->family = AMDGPU_FAMILY_VGH;
adev->apu_flags |= AMD_APU_IS_VANGOGH;
break;
case IP_VERSION(10, 3, 3):
adev->family = AMDGPU_FAMILY_YC;
break;
case IP_VERSION(10, 3, 6):
adev->family = AMDGPU_FAMILY_GC_10_3_6;
break;
case IP_VERSION(10, 3, 7):
adev->family = AMDGPU_FAMILY_GC_10_3_7;
break;
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
adev->family = AMDGPU_FAMILY_GC_11_0_0;
break;
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 4):
adev->family = AMDGPU_FAMILY_GC_11_0_1;
break;
default:
return -EINVAL;
}
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 2):
case IP_VERSION(9, 3, 0):
case IP_VERSION(10, 1, 3):
case IP_VERSION(10, 1, 4):
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 3):
case IP_VERSION(10, 3, 6):
case IP_VERSION(10, 3, 7):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 4):
adev->flags |= AMD_IS_APU;
break;
default:
break;
}
if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(4, 8, 0))
adev->gmc.xgmi.supported = true;
/* set NBIO version */
switch (adev->ip_versions[NBIO_HWIP][0]) {
case IP_VERSION(6, 1, 0):
case IP_VERSION(6, 2, 0):
adev->nbio.funcs = &nbio_v6_1_funcs;
adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
break;
case IP_VERSION(7, 0, 0):
case IP_VERSION(7, 0, 1):
case IP_VERSION(2, 5, 0):
adev->nbio.funcs = &nbio_v7_0_funcs;
adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
break;
case IP_VERSION(7, 4, 0):
case IP_VERSION(7, 4, 1):
case IP_VERSION(7, 4, 4):
adev->nbio.funcs = &nbio_v7_4_funcs;
adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
break;
case IP_VERSION(7, 9, 0):
adev->nbio.funcs = &nbio_v7_9_funcs;
adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
break;
case IP_VERSION(7, 2, 0):
case IP_VERSION(7, 2, 1):
case IP_VERSION(7, 3, 0):
case IP_VERSION(7, 5, 0):
case IP_VERSION(7, 5, 1):
adev->nbio.funcs = &nbio_v7_2_funcs;
adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
break;
case IP_VERSION(2, 1, 1):
case IP_VERSION(2, 3, 0):
case IP_VERSION(2, 3, 1):
case IP_VERSION(2, 3, 2):
case IP_VERSION(3, 3, 0):
case IP_VERSION(3, 3, 1):
case IP_VERSION(3, 3, 2):
case IP_VERSION(3, 3, 3):
adev->nbio.funcs = &nbio_v2_3_funcs;
adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
break;
case IP_VERSION(4, 3, 0):
case IP_VERSION(4, 3, 1):
if (amdgpu_sriov_vf(adev))
adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
else
adev->nbio.funcs = &nbio_v4_3_funcs;
adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
break;
case IP_VERSION(7, 7, 0):
case IP_VERSION(7, 7, 1):
adev->nbio.funcs = &nbio_v7_7_funcs;
adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
break;
default:
break;
}
switch (adev->ip_versions[HDP_HWIP][0]) {
case IP_VERSION(4, 0, 0):
case IP_VERSION(4, 0, 1):
case IP_VERSION(4, 1, 0):
case IP_VERSION(4, 1, 1):
case IP_VERSION(4, 1, 2):
case IP_VERSION(4, 2, 0):
case IP_VERSION(4, 2, 1):
case IP_VERSION(4, 4, 0):
case IP_VERSION(4, 4, 2):
adev->hdp.funcs = &hdp_v4_0_funcs;
break;
case IP_VERSION(5, 0, 0):
case IP_VERSION(5, 0, 1):
case IP_VERSION(5, 0, 2):
case IP_VERSION(5, 0, 3):
case IP_VERSION(5, 0, 4):
case IP_VERSION(5, 2, 0):
adev->hdp.funcs = &hdp_v5_0_funcs;
break;
case IP_VERSION(5, 2, 1):
adev->hdp.funcs = &hdp_v5_2_funcs;
break;
case IP_VERSION(6, 0, 0):
case IP_VERSION(6, 0, 1):
case IP_VERSION(6, 1, 0):
adev->hdp.funcs = &hdp_v6_0_funcs;
break;
default:
break;
}
switch (adev->ip_versions[DF_HWIP][0]) {
case IP_VERSION(3, 6, 0):
case IP_VERSION(3, 6, 1):
case IP_VERSION(3, 6, 2):
adev->df.funcs = &df_v3_6_funcs;
break;
case IP_VERSION(2, 1, 0):
case IP_VERSION(2, 1, 1):
case IP_VERSION(2, 5, 0):
case IP_VERSION(3, 5, 1):
case IP_VERSION(3, 5, 2):
adev->df.funcs = &df_v1_7_funcs;
break;
case IP_VERSION(4, 3, 0):
adev->df.funcs = &df_v4_3_funcs;
break;
default:
break;
}
switch (adev->ip_versions[SMUIO_HWIP][0]) {
case IP_VERSION(9, 0, 0):
case IP_VERSION(9, 0, 1):
case IP_VERSION(10, 0, 0):
case IP_VERSION(10, 0, 1):
case IP_VERSION(10, 0, 2):
adev->smuio.funcs = &smuio_v9_0_funcs;
break;
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
case IP_VERSION(11, 0, 4):
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 8):
adev->smuio.funcs = &smuio_v11_0_funcs;
break;
case IP_VERSION(11, 0, 6):
case IP_VERSION(11, 0, 10):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 5, 0):
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 9):
case IP_VERSION(13, 0, 10):
adev->smuio.funcs = &smuio_v11_0_6_funcs;
break;
case IP_VERSION(13, 0, 2):
adev->smuio.funcs = &smuio_v13_0_funcs;
break;
case IP_VERSION(13, 0, 3):
adev->smuio.funcs = &smuio_v13_0_3_funcs;
if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) {
adev->flags |= AMD_IS_APU;
}
break;
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 8):
case IP_VERSION(14, 0, 0):
adev->smuio.funcs = &smuio_v13_0_6_funcs;
break;
default:
break;
}
switch (adev->ip_versions[LSDMA_HWIP][0]) {
case IP_VERSION(6, 0, 0):
case IP_VERSION(6, 0, 1):
case IP_VERSION(6, 0, 2):
case IP_VERSION(6, 0, 3):
adev->lsdma.funcs = &lsdma_v6_0_funcs;
break;
default:
break;
}
r = amdgpu_discovery_set_common_ip_blocks(adev);
if (r)
return r;
r = amdgpu_discovery_set_gmc_ip_blocks(adev);
if (r)
return r;
/* For SR-IOV, PSP needs to be initialized before IH */
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_discovery_set_psp_ip_blocks(adev);
if (r)
return r;
r = amdgpu_discovery_set_ih_ip_blocks(adev);
if (r)
return r;
} else {
r = amdgpu_discovery_set_ih_ip_blocks(adev);
if (r)
return r;
if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
r = amdgpu_discovery_set_psp_ip_blocks(adev);
if (r)
return r;
}
}
if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
r = amdgpu_discovery_set_smu_ip_blocks(adev);
if (r)
return r;
}
r = amdgpu_discovery_set_display_ip_blocks(adev);
if (r)
return r;
r = amdgpu_discovery_set_gc_ip_blocks(adev);
if (r)
return r;
r = amdgpu_discovery_set_sdma_ip_blocks(adev);
if (r)
return r;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
!amdgpu_sriov_vf(adev)) ||
(adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
r = amdgpu_discovery_set_smu_ip_blocks(adev);
if (r)
return r;
}
r = amdgpu_discovery_set_mm_ip_blocks(adev);
if (r)
return r;
r = amdgpu_discovery_set_mes_ip_blocks(adev);
if (r)
return r;
return 0;
}
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c |
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <drm/drm_cache.h>
#include "amdgpu.h"
#include "gmc_v6_0.h"
#include "amdgpu_ucode.h"
#include "amdgpu_gem.h"
#include "bif/bif_3_0_d.h"
#include "bif/bif_3_0_sh_mask.h"
#include "oss/oss_1_0_d.h"
#include "oss/oss_1_0_sh_mask.h"
#include "gmc/gmc_6_0_d.h"
#include "gmc/gmc_6_0_sh_mask.h"
#include "dce/dce_6_0_d.h"
#include "dce/dce_6_0_sh_mask.h"
#include "si_enums.h"
static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev);
static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static int gmc_v6_0_wait_for_idle(void *handle);
MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
MODULE_FIRMWARE("amdgpu/verde_mc.bin");
MODULE_FIRMWARE("amdgpu/oland_mc.bin");
MODULE_FIRMWARE("amdgpu/hainan_mc.bin");
MODULE_FIRMWARE("amdgpu/si58_mc.bin");
#define MC_SEQ_MISC0__MT__MASK 0xf0000000
#define MC_SEQ_MISC0__MT__GDDR1 0x10000000
#define MC_SEQ_MISC0__MT__DDR2 0x20000000
#define MC_SEQ_MISC0__MT__GDDR3 0x30000000
#define MC_SEQ_MISC0__MT__GDDR4 0x40000000
#define MC_SEQ_MISC0__MT__GDDR5 0x50000000
#define MC_SEQ_MISC0__MT__HBM 0x60000000
#define MC_SEQ_MISC0__MT__DDR3 0xB0000000
static void gmc_v6_0_mc_stop(struct amdgpu_device *adev)
{
u32 blackout;
gmc_v6_0_wait_for_idle((void *)adev);
blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
/* Block CPU access */
WREG32(mmBIF_FB_EN, 0);
/* blackout the MC */
blackout = REG_SET_FIELD(blackout,
MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
}
/* wait for the MC to settle */
udelay(100);
}
static void gmc_v6_0_mc_resume(struct amdgpu_device *adev)
{
u32 tmp;
/* unblackout the MC */
tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
/* allow CPU access */
tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
WREG32(mmBIF_FB_EN, tmp);
}
static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
char fw_name[30];
int err;
bool is_58_fw = false;
DRM_DEBUG("\n");
switch (adev->asic_type) {
case CHIP_TAHITI:
chip_name = "tahiti";
break;
case CHIP_PITCAIRN:
chip_name = "pitcairn";
break;
case CHIP_VERDE:
chip_name = "verde";
break;
case CHIP_OLAND:
chip_name = "oland";
break;
case CHIP_HAINAN:
chip_name = "hainan";
break;
default:
BUG();
}
/* this memory configuration requires special firmware */
if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
is_58_fw = true;
if (is_58_fw)
snprintf(fw_name, sizeof(fw_name), "amdgpu/si58_mc.bin");
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->gmc.fw, fw_name);
if (err) {
dev_err(adev->dev,
"si_mc: Failed to load firmware \"%s\"\n",
fw_name);
amdgpu_ucode_release(&adev->gmc.fw);
}
return err;
}
static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
{
const __le32 *new_fw_data = NULL;
u32 running;
const __le32 *new_io_mc_regs = NULL;
int i, regs_size, ucode_size;
const struct mc_firmware_header_v1_0 *hdr;
if (!adev->gmc.fw)
return -EINVAL;
hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
amdgpu_ucode_print_mc_hdr(&hdr->header);
adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
new_io_mc_regs = (const __le32 *)
(adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
new_fw_data = (const __le32 *)
(adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
running = RREG32(mmMC_SEQ_SUP_CNTL) & MC_SEQ_SUP_CNTL__RUN_MASK;
if (running == 0) {
/* reset the engine and set to writable */
WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
/* load mc io regs */
for (i = 0; i < regs_size; i++) {
WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
}
/* load the MC ucode */
for (i = 0; i < ucode_size; i++)
WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
/* put the engine back into the active state */
WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
/* wait for training to complete */
for (i = 0; i < adev->usec_timeout; i++) {
if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D0_MASK)
break;
udelay(1);
}
for (i = 0; i < adev->usec_timeout; i++) {
if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D1_MASK)
break;
udelay(1);
}
}
return 0;
}
static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc)
{
u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
base <<= 24;
amdgpu_gmc_vram_location(adev, mc, base);
amdgpu_gmc_gart_location(adev, mc);
}
static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
{
int i, j;
/* Initialize HDP */
for (i = 0, j = 0; i < 32; i++, j += 0x6) {
WREG32((0xb05 + j), 0x00000000);
WREG32((0xb06 + j), 0x00000000);
WREG32((0xb07 + j), 0x00000000);
WREG32((0xb08 + j), 0x00000000);
WREG32((0xb09 + j), 0x00000000);
}
WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
if (gmc_v6_0_wait_for_idle((void *)adev))
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
if (adev->mode_info.num_crtc) {
u32 tmp;
/* Lockout access through VGA aperture*/
tmp = RREG32(mmVGA_HDP_CONTROL);
tmp |= VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK;
WREG32(mmVGA_HDP_CONTROL, tmp);
/* disable VGA render */
tmp = RREG32(mmVGA_RENDER_CONTROL);
tmp &= ~VGA_VSTATUS_CNTL;
WREG32(mmVGA_RENDER_CONTROL, tmp);
}
/* Update configuration */
WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
adev->gmc.vram_start >> 12);
WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
adev->gmc.vram_end >> 12);
WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
adev->mem_scratch.gpu_addr >> 12);
WREG32(mmMC_VM_AGP_BASE, 0);
WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
if (gmc_v6_0_wait_for_idle((void *)adev))
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
{
u32 tmp;
int chansize, numchan;
int r;
tmp = RREG32(mmMC_ARB_RAMCFG);
if (tmp & (1 << 11))
chansize = 16;
else if (tmp & MC_ARB_RAMCFG__CHANSIZE_MASK)
chansize = 64;
else
chansize = 32;
tmp = RREG32(mmMC_SHARED_CHMAP);
switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
case 0:
default:
numchan = 1;
break;
case 1:
numchan = 2;
break;
case 2:
numchan = 4;
break;
case 3:
numchan = 8;
break;
case 4:
numchan = 3;
break;
case 5:
numchan = 6;
break;
case 6:
numchan = 10;
break;
case 7:
numchan = 12;
break;
case 8:
numchan = 16;
break;
}
adev->gmc.vram_width = numchan * chansize;
/* size in MB on si */
adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
if (!(adev->flags & AMD_IS_APU)) {
r = amdgpu_device_resize_fb_bar(adev);
if (r)
return r;
}
adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
adev->gmc.visible_vram_size = adev->gmc.aper_size;
/* set the gart size */
if (amdgpu_gart_size == -1) {
switch (adev->asic_type) {
case CHIP_HAINAN: /* no MM engines */
default:
adev->gmc.gart_size = 256ULL << 20;
break;
case CHIP_VERDE: /* UVD, VCE do not support GPUVM */
case CHIP_TAHITI: /* UVD, VCE do not support GPUVM */
case CHIP_PITCAIRN: /* UVD, VCE do not support GPUVM */
case CHIP_OLAND: /* UVD, VCE do not support GPUVM */
adev->gmc.gart_size = 1024ULL << 20;
break;
}
} else {
adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
}
adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
gmc_v6_0_vram_gtt_location(adev, &adev->gmc);
return 0;
}
static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
uint32_t vmhub, uint32_t flush_type)
{
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
}
static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
unsigned int vmid, uint64_t pd_addr)
{
uint32_t reg;
/* write new base address */
if (vmid < 8)
reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
else
reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8);
amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
/* bits 0-15 are the VM contexts0-15 */
amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
return pd_addr;
}
static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
}
static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping,
uint64_t *flags)
{
*flags &= ~AMDGPU_PTE_EXECUTABLE;
*flags &= ~AMDGPU_PTE_PRT;
}
static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value)
{
u32 tmp;
tmp = RREG32(mmVM_CONTEXT1_CNTL);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
WREG32(mmVM_CONTEXT1_CNTL, tmp);
}
/**
* gmc_v8_0_set_prt() - set PRT VM fault
*
* @adev: amdgpu_device pointer
* @enable: enable/disable VM fault handling for PRT
*/
static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
{
u32 tmp;
if (enable && !adev->gmc.prt_warning) {
dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
adev->gmc.prt_warning = true;
}
tmp = RREG32(mmVM_PRT_CNTL);
tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
CB_DISABLE_FAULT_ON_UNMAPPED_ACCESS,
enable);
tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
TC_DISABLE_FAULT_ON_UNMAPPED_ACCESS,
enable);
tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
L2_CACHE_STORE_INVALID_ENTRIES,
enable);
tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL,
L1_TLB_STORE_INVALID_ENTRIES,
enable);
WREG32(mmVM_PRT_CNTL, tmp);
if (enable) {
uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
uint32_t high = adev->vm_manager.max_pfn -
(AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low);
WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low);
WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high);
WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high);
WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high);
WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high);
} else {
WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff);
WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff);
WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff);
WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff);
WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0);
WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0);
WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0);
WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0);
}
}
static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
{
uint64_t table_addr;
u32 field;
int i;
if (adev->gart.bo == NULL) {
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
/* Setup TLB control */
WREG32(mmMC_VM_MX_L1_TLB_CNTL,
(0xA << 7) |
MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK |
MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_FRAGMENT_PROCESSING_MASK |
MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK |
(0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
/* Setup L2 cache */
WREG32(mmVM_L2_CNTL,
VM_L2_CNTL__ENABLE_L2_CACHE_MASK |
VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK |
VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
(7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
(1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
WREG32(mmVM_L2_CNTL2,
VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK |
VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK);
field = adev->vm_manager.fragment_size;
WREG32(mmVM_L2_CNTL3,
VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
(field << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
(field << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
/* setup context0 */
WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12);
WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(adev->dummy_page_addr >> 12));
WREG32(mmVM_CONTEXT0_CNTL2, 0);
WREG32(mmVM_CONTEXT0_CNTL,
VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK |
(0UL << VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
WREG32(0x575, 0);
WREG32(0x576, 0);
WREG32(0x577, 0);
/* empty context1-15 */
/* set vm size, must be a multiple of 4 */
WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
/* Assign the pt base to something valid for now; the pts used for
* the VMs are determined by the application and setup and assigned
* on the fly in the vm part of radeon_gart.c
*/
for (i = 1; i < AMDGPU_NUM_VMID; i++) {
if (i < 8)
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
table_addr >> 12);
else
WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
table_addr >> 12);
}
/* enable context1-15 */
WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(adev->dummy_page_addr >> 12));
WREG32(mmVM_CONTEXT1_CNTL2, 4);
WREG32(mmVM_CONTEXT1_CNTL,
VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
(1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
((adev->vm_manager.block_size - 9)
<< VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
gmc_v6_0_set_fault_enable_default(adev, false);
else
gmc_v6_0_set_fault_enable_default(adev, true);
gmc_v6_0_flush_gpu_tlb(adev, 0, 0, 0);
dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned int)(adev->gmc.gart_size >> 20),
(unsigned long long)table_addr);
return 0;
}
static int gmc_v6_0_gart_init(struct amdgpu_device *adev)
{
int r;
if (adev->gart.bo) {
dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n");
return 0;
}
r = amdgpu_gart_init(adev);
if (r)
return r;
adev->gart.table_size = adev->gart.num_gpu_pages * 8;
adev->gart.gart_pte_flags = 0;
return amdgpu_gart_table_vram_alloc(adev);
}
static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
{
/*unsigned i;
for (i = 1; i < 16; ++i) {
uint32_t reg;
if (i < 8)
reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i ;
else
reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (i - 8);
adev->vm_manager.saved_table_addr[i] = RREG32(reg);
}*/
/* Disable all tables */
WREG32(mmVM_CONTEXT0_CNTL, 0);
WREG32(mmVM_CONTEXT1_CNTL, 0);
/* Setup TLB control */
WREG32(mmMC_VM_MX_L1_TLB_CNTL,
MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
(0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
/* Setup L2 cache */
WREG32(mmVM_L2_CNTL,
VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
(7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
(1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
WREG32(mmVM_L2_CNTL2, 0);
WREG32(mmVM_L2_CNTL3,
VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
(0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
}
static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
u32 status, u32 addr, u32 mc_client)
{
u32 mc_id;
u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
PROTECTIONS);
char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
MEMORY_CLIENT_ID);
dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
protections, vmid, addr,
REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
MEMORY_CLIENT_RW) ?
"write" : "read", block, mc_client, mc_id);
}
/*
static const u32 mc_cg_registers[] = {
MC_HUB_MISC_HUB_CG,
MC_HUB_MISC_SIP_CG,
MC_HUB_MISC_VM_CG,
MC_XPB_CLK_GAT,
ATC_MISC_CG,
MC_CITF_MISC_WR_CG,
MC_CITF_MISC_RD_CG,
MC_CITF_MISC_VM_CG,
VM_L2_CG,
};
static const u32 mc_cg_ls_en[] = {
MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK,
MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK,
MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK,
MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK,
ATC_MISC_CG__MEM_LS_ENABLE_MASK,
MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK,
MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK,
MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK,
VM_L2_CG__MEM_LS_ENABLE_MASK,
};
static const u32 mc_cg_en[] = {
MC_HUB_MISC_HUB_CG__ENABLE_MASK,
MC_HUB_MISC_SIP_CG__ENABLE_MASK,
MC_HUB_MISC_VM_CG__ENABLE_MASK,
MC_XPB_CLK_GAT__ENABLE_MASK,
ATC_MISC_CG__ENABLE_MASK,
MC_CITF_MISC_WR_CG__ENABLE_MASK,
MC_CITF_MISC_RD_CG__ENABLE_MASK,
MC_CITF_MISC_VM_CG__ENABLE_MASK,
VM_L2_CG__ENABLE_MASK,
};
static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev,
bool enable)
{
int i;
u32 orig, data;
for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
orig = data = RREG32(mc_cg_registers[i]);
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS))
data |= mc_cg_ls_en[i];
else
data &= ~mc_cg_ls_en[i];
if (data != orig)
WREG32(mc_cg_registers[i], data);
}
}
static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev,
bool enable)
{
int i;
u32 orig, data;
for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
orig = data = RREG32(mc_cg_registers[i]);
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG))
data |= mc_cg_en[i];
else
data &= ~mc_cg_en[i];
if (data != orig)
WREG32(mc_cg_registers[i], data);
}
}
static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev,
bool enable)
{
u32 orig, data;
orig = data = RREG32_PCIE(ixPCIE_CNTL2);
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) {
data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1);
} else {
data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0);
data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0);
data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0);
data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0);
}
if (orig != data)
WREG32_PCIE(ixPCIE_CNTL2, data);
}
static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
bool enable)
{
u32 orig, data;
orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
else
data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
if (orig != data)
WREG32(mmHDP_HOST_PATH_CNTL, data);
}
static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
bool enable)
{
u32 orig, data;
orig = data = RREG32(mmHDP_MEM_POWER_LS);
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
else
data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
if (orig != data)
WREG32(mmHDP_MEM_POWER_LS, data);
}
*/
static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type)
{
switch (mc_seq_vram_type) {
case MC_SEQ_MISC0__MT__GDDR1:
return AMDGPU_VRAM_TYPE_GDDR1;
case MC_SEQ_MISC0__MT__DDR2:
return AMDGPU_VRAM_TYPE_DDR2;
case MC_SEQ_MISC0__MT__GDDR3:
return AMDGPU_VRAM_TYPE_GDDR3;
case MC_SEQ_MISC0__MT__GDDR4:
return AMDGPU_VRAM_TYPE_GDDR4;
case MC_SEQ_MISC0__MT__GDDR5:
return AMDGPU_VRAM_TYPE_GDDR5;
case MC_SEQ_MISC0__MT__DDR3:
return AMDGPU_VRAM_TYPE_DDR3;
default:
return AMDGPU_VRAM_TYPE_UNKNOWN;
}
}
static int gmc_v6_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v6_0_set_gmc_funcs(adev);
gmc_v6_0_set_irq_funcs(adev);
return 0;
}
static int gmc_v6_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
else
return 0;
}
static unsigned int gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev)
{
u32 d1vga_control = RREG32(mmD1VGA_CONTROL);
unsigned int size;
if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
size = AMDGPU_VBIOS_VGA_ALLOCATION;
} else {
u32 viewport = RREG32(mmVIEWPORT_SIZE);
size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) *
4);
}
return size;
}
static int gmc_v6_0_sw_init(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
if (adev->flags & AMD_IS_APU) {
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
} else {
u32 tmp = RREG32(mmMC_SEQ_MISC0);
tmp &= MC_SEQ_MISC0__MT__MASK;
adev->gmc.vram_type = gmc_v6_0_convert_vram_type(tmp);
}
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
if (r)
return r;
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
if (r)
return r;
amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
adev->gmc.mc_mask = 0xffffffffffULL;
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
if (r) {
dev_warn(adev->dev, "No suitable DMA available.\n");
return r;
}
adev->need_swiotlb = drm_need_swiotlb(40);
r = gmc_v6_0_init_microcode(adev);
if (r) {
dev_err(adev->dev, "Failed to load mc firmware!\n");
return r;
}
r = gmc_v6_0_mc_init(adev);
if (r)
return r;
amdgpu_gmc_get_vbios_allocations(adev);
r = amdgpu_bo_init(adev);
if (r)
return r;
r = gmc_v6_0_gart_init(adev);
if (r)
return r;
/*
* number of VMs
* VMID 0 is reserved for System
* amdgpu graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15
*/
adev->vm_manager.first_kfd_vmid = 8;
amdgpu_vm_manager_init(adev);
/* base offset of vram pages */
if (adev->flags & AMD_IS_APU) {
u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
tmp <<= 22;
adev->vm_manager.vram_base_offset = tmp;
} else {
adev->vm_manager.vram_base_offset = 0;
}
return 0;
}
static int gmc_v6_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev);
amdgpu_ucode_release(&adev->gmc.fw);
return 0;
}
static int gmc_v6_0_hw_init(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v6_0_mc_program(adev);
if (!(adev->flags & AMD_IS_APU)) {
r = gmc_v6_0_mc_load_microcode(adev);
if (r) {
dev_err(adev->dev, "Failed to load MC firmware!\n");
return r;
}
}
r = gmc_v6_0_gart_enable(adev);
if (r)
return r;
if (amdgpu_emu_mode == 1)
return amdgpu_gmc_vram_checking(adev);
else
return r;
}
static int gmc_v6_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
gmc_v6_0_gart_disable(adev);
return 0;
}
static int gmc_v6_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v6_0_hw_fini(adev);
return 0;
}
static int gmc_v6_0_resume(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = gmc_v6_0_hw_init(adev);
if (r)
return r;
amdgpu_vmid_reset_all(adev);
return 0;
}
static bool gmc_v6_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
return false;
return true;
}
static int gmc_v6_0_wait_for_idle(void *handle)
{
unsigned int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
if (gmc_v6_0_is_idle(handle))
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
static int gmc_v6_0_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
if (!(adev->flags & AMD_IS_APU))
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
}
if (srbm_soft_reset) {
gmc_v6_0_mc_stop(adev);
if (gmc_v6_0_wait_for_idle(adev))
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(mmSRBM_SOFT_RESET, tmp);
tmp = RREG32(mmSRBM_SOFT_RESET);
udelay(50);
tmp &= ~srbm_soft_reset;
WREG32(mmSRBM_SOFT_RESET, tmp);
tmp = RREG32(mmSRBM_SOFT_RESET);
udelay(50);
gmc_v6_0_mc_resume(adev);
udelay(50);
}
return 0;
}
static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned int type,
enum amdgpu_interrupt_state state)
{
u32 tmp;
u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
tmp = RREG32(mmVM_CONTEXT0_CNTL);
tmp &= ~bits;
WREG32(mmVM_CONTEXT0_CNTL, tmp);
tmp = RREG32(mmVM_CONTEXT1_CNTL);
tmp &= ~bits;
WREG32(mmVM_CONTEXT1_CNTL, tmp);
break;
case AMDGPU_IRQ_STATE_ENABLE:
tmp = RREG32(mmVM_CONTEXT0_CNTL);
tmp |= bits;
WREG32(mmVM_CONTEXT0_CNTL, tmp);
tmp = RREG32(mmVM_CONTEXT1_CNTL);
tmp |= bits;
WREG32(mmVM_CONTEXT1_CNTL, tmp);
break;
default:
break;
}
return 0;
}
static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
u32 addr, status;
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
if (!addr && !status)
return 0;
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
gmc_v6_0_set_fault_enable_default(adev, false);
if (printk_ratelimit()) {
dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
entry->src_id, entry->src_data[0]);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
addr);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status);
gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
}
return 0;
}
static int gmc_v6_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
return 0;
}
static int gmc_v6_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
return 0;
}
static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
.name = "gmc_v6_0",
.early_init = gmc_v6_0_early_init,
.late_init = gmc_v6_0_late_init,
.sw_init = gmc_v6_0_sw_init,
.sw_fini = gmc_v6_0_sw_fini,
.hw_init = gmc_v6_0_hw_init,
.hw_fini = gmc_v6_0_hw_fini,
.suspend = gmc_v6_0_suspend,
.resume = gmc_v6_0_resume,
.is_idle = gmc_v6_0_is_idle,
.wait_for_idle = gmc_v6_0_wait_for_idle,
.soft_reset = gmc_v6_0_soft_reset,
.set_clockgating_state = gmc_v6_0_set_clockgating_state,
.set_powergating_state = gmc_v6_0_set_powergating_state,
};
static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v6_0_flush_gpu_tlb,
.emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb,
.set_prt = gmc_v6_0_set_prt,
.get_vm_pde = gmc_v6_0_get_vm_pde,
.get_vm_pte = gmc_v6_0_get_vm_pte,
.get_vbios_fb_size = gmc_v6_0_get_vbios_fb_size,
};
static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
.set = gmc_v6_0_vm_fault_interrupt_state,
.process = gmc_v6_0_process_interrupt,
};
static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev)
{
adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs;
}
static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->gmc.vm_fault.num_types = 1;
adev->gmc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
}
const struct amdgpu_ip_block_version gmc_v6_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_GMC,
.major = 6,
.minor = 0,
.rev = 0,
.funcs = &gmc_v6_0_ip_funcs,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c |
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Christian König <[email protected]>
*/
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_uvd.h"
#include "vid.h"
#include "uvd/uvd_6_0_d.h"
#include "uvd/uvd_6_0_sh_mask.h"
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
#include "smu/smu_7_1_3_d.h"
#include "smu/smu_7_1_3_sh_mask.h"
#include "bif/bif_5_1_d.h"
#include "gmc/gmc_8_1_d.h"
#include "vi.h"
#include "ivsrcid/ivsrcid_vislands30.h"
/* Polaris10/11/12 firmware version */
#define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8))
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static int uvd_v6_0_start(struct amdgpu_device *adev);
static void uvd_v6_0_stop(struct amdgpu_device *adev);
static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
static int uvd_v6_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state);
static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
bool enable);
/**
* uvd_v6_0_enc_support - get encode support status
*
* @adev: amdgpu_device pointer
*
* Returns the current hardware encode support status
*/
static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev)
{
return ((adev->asic_type >= CHIP_POLARIS10) &&
(adev->asic_type <= CHIP_VEGAM) &&
(!adev->uvd.fw_version || adev->uvd.fw_version >= FW_1_130_16));
}
/**
* uvd_v6_0_ring_get_rptr - get read pointer
*
* @ring: amdgpu_ring pointer
*
* Returns the current hardware read pointer
*/
static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
return RREG32(mmUVD_RBC_RB_RPTR);
}
/**
* uvd_v6_0_enc_ring_get_rptr - get enc read pointer
*
* @ring: amdgpu_ring pointer
*
* Returns the current hardware enc read pointer
*/
static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring == &adev->uvd.inst->ring_enc[0])
return RREG32(mmUVD_RB_RPTR);
else
return RREG32(mmUVD_RB_RPTR2);
}
/**
* uvd_v6_0_ring_get_wptr - get write pointer
*
* @ring: amdgpu_ring pointer
*
* Returns the current hardware write pointer
*/
static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
return RREG32(mmUVD_RBC_RB_WPTR);
}
/**
* uvd_v6_0_enc_ring_get_wptr - get enc write pointer
*
* @ring: amdgpu_ring pointer
*
* Returns the current hardware enc write pointer
*/
static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring == &adev->uvd.inst->ring_enc[0])
return RREG32(mmUVD_RB_WPTR);
else
return RREG32(mmUVD_RB_WPTR2);
}
/**
* uvd_v6_0_ring_set_wptr - set write pointer
*
* @ring: amdgpu_ring pointer
*
* Commits the write pointer to the hardware
*/
static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
}
/**
* uvd_v6_0_enc_ring_set_wptr - set enc write pointer
*
* @ring: amdgpu_ring pointer
*
* Commits the enc write pointer to the hardware
*/
static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring == &adev->uvd.inst->ring_enc[0])
WREG32(mmUVD_RB_WPTR,
lower_32_bits(ring->wptr));
else
WREG32(mmUVD_RB_WPTR2,
lower_32_bits(ring->wptr));
}
/**
* uvd_v6_0_enc_ring_test_ring - test if UVD ENC ring is working
*
* @ring: the engine to test on
*
*/
static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
uint32_t rptr;
unsigned i;
int r;
r = amdgpu_ring_alloc(ring, 16);
if (r)
return r;
rptr = amdgpu_ring_get_rptr(ring);
amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
if (amdgpu_ring_get_rptr(ring) != rptr)
break;
udelay(1);
}
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
return r;
}
/**
* uvd_v6_0_enc_get_create_msg - generate a UVD ENC create msg
*
* @ring: ring we should submit the msg to
* @handle: session handle to use
* @bo: amdgpu object for which we query the offset
* @fence: optional fence to return
*
* Open up a stream for HW test
*/
static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
struct amdgpu_bo *bo,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
ib = &job->ibs[0];
addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
ib->ptr[ib->length_dw++] = handle;
ib->ptr[ib->length_dw++] = 0x00010000;
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x00000014;
ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
ib->ptr[ib->length_dw++] = 0x0000001c;
ib->ptr[ib->length_dw++] = 0x00000001;
ib->ptr[ib->length_dw++] = 0x00000000;
ib->ptr[ib->length_dw++] = 0x00000008;
ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err;
if (fence)
*fence = dma_fence_get(f);
dma_fence_put(f);
return 0;
err:
amdgpu_job_free(job);
return r;
}
/**
* uvd_v6_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
*
* @ring: ring we should submit the msg to
* @handle: session handle to use
* @bo: amdgpu object for which we query the offset
* @fence: optional fence to return
*
* Close up a stream for HW test or if userspace failed to do so
*/
static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
uint32_t handle,
struct amdgpu_bo *bo,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
uint64_t addr;
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
ib = &job->ibs[0];
addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
ib->ptr[ib->length_dw++] = handle;
ib->ptr[ib->length_dw++] = 0x00010000;
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x00000014;
ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
ib->ptr[ib->length_dw++] = 0x0000001c;
ib->ptr[ib->length_dw++] = 0x00000001;
ib->ptr[ib->length_dw++] = 0x00000000;
ib->ptr[ib->length_dw++] = 0x00000008;
ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err;
if (fence)
*fence = dma_fence_get(f);
dma_fence_put(f);
return 0;
err:
amdgpu_job_free(job);
return r;
}
/**
* uvd_v6_0_enc_ring_test_ib - test if UVD ENC IBs are working
*
* @ring: the engine to test on
* @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
*
*/
static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
struct amdgpu_bo *bo = ring->adev->uvd.ib_bo;
long r;
r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL);
if (r)
goto error;
r = uvd_v6_0_enc_get_destroy_msg(ring, 1, bo, &fence);
if (r)
goto error;
r = dma_fence_wait_timeout(fence, false, timeout);
if (r == 0)
r = -ETIMEDOUT;
else if (r > 0)
r = 0;
error:
dma_fence_put(fence);
return r;
}
static int uvd_v6_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->uvd.num_uvd_inst = 1;
if (!(adev->flags & AMD_IS_APU) &&
(RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK))
return -ENOENT;
uvd_v6_0_set_ring_funcs(adev);
if (uvd_v6_0_enc_support(adev)) {
adev->uvd.num_enc_rings = 2;
uvd_v6_0_set_enc_ring_funcs(adev);
}
uvd_v6_0_set_irq_funcs(adev);
return 0;
}
static int uvd_v6_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
int i, r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* UVD TRAP */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
if (r)
return r;
/* UVD ENC TRAP */
if (uvd_v6_0_enc_support(adev)) {
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq);
if (r)
return r;
}
}
r = amdgpu_uvd_sw_init(adev);
if (r)
return r;
if (!uvd_v6_0_enc_support(adev)) {
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
adev->uvd.inst->ring_enc[i].funcs = NULL;
adev->uvd.inst->irq.num_types = 1;
adev->uvd.num_enc_rings = 0;
DRM_INFO("UVD ENC is disabled\n");
}
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
r = amdgpu_uvd_resume(adev);
if (r)
return r;
if (uvd_v6_0_enc_support(adev)) {
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.inst->ring_enc[i];
sprintf(ring->name, "uvd_enc%d", i);
r = amdgpu_ring_init(adev, ring, 512,
&adev->uvd.inst->irq, 0,
AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
}
r = amdgpu_uvd_entity_init(adev);
return r;
}
static int uvd_v6_0_sw_fini(void *handle)
{
int i, r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = amdgpu_uvd_suspend(adev);
if (r)
return r;
if (uvd_v6_0_enc_support(adev)) {
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
}
return amdgpu_uvd_sw_fini(adev);
}
/**
* uvd_v6_0_hw_init - start and test UVD block
*
* @handle: handle used to pass amdgpu_device pointer
*
* Initialize the hardware, boot up the VCPU and do some testing
*/
static int uvd_v6_0_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t tmp;
int i, r;
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
uvd_v6_0_enable_mgcg(adev, true);
r = amdgpu_ring_test_helper(ring);
if (r)
goto done;
r = amdgpu_ring_alloc(ring, 10);
if (r) {
DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
goto done;
}
tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
amdgpu_ring_write(ring, tmp);
amdgpu_ring_write(ring, 0xFFFFF);
tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
amdgpu_ring_write(ring, tmp);
amdgpu_ring_write(ring, 0xFFFFF);
tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
amdgpu_ring_write(ring, tmp);
amdgpu_ring_write(ring, 0xFFFFF);
/* Clear timeout status bits */
amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
amdgpu_ring_write(ring, 0x8);
amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
amdgpu_ring_write(ring, 3);
amdgpu_ring_commit(ring);
if (uvd_v6_0_enc_support(adev)) {
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.inst->ring_enc[i];
r = amdgpu_ring_test_helper(ring);
if (r)
goto done;
}
}
done:
if (!r) {
if (uvd_v6_0_enc_support(adev))
DRM_INFO("UVD and UVD ENC initialized successfully.\n");
else
DRM_INFO("UVD initialized successfully.\n");
}
return r;
}
/**
* uvd_v6_0_hw_fini - stop the hardware block
*
* @handle: handle used to pass amdgpu_device pointer
*
* Stop the UVD block, mark ring as not ready any more
*/
static int uvd_v6_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
cancel_delayed_work_sync(&adev->uvd.idle_work);
if (RREG32(mmUVD_STATUS) != 0)
uvd_v6_0_stop(adev);
return 0;
}
static int uvd_v6_0_suspend(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/*
* Proper cleanups before halting the HW engine:
* - cancel the delayed idle work
* - enable powergating
* - enable clockgating
* - disable dpm
*
* TODO: to align with the VCN implementation, move the
* jobs for clockgating/powergating/dpm setting to
* ->set_powergating_state().
*/
cancel_delayed_work_sync(&adev->uvd.idle_work);
if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_uvd(adev, false);
} else {
amdgpu_asic_set_uvd_clocks(adev, 0, 0);
/* shutdown the UVD block */
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_GATE);
amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_GATE);
}
r = uvd_v6_0_hw_fini(adev);
if (r)
return r;
return amdgpu_uvd_suspend(adev);
}
static int uvd_v6_0_resume(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = amdgpu_uvd_resume(adev);
if (r)
return r;
return uvd_v6_0_hw_init(adev);
}
/**
* uvd_v6_0_mc_resume - memory controller programming
*
* @adev: amdgpu_device pointer
*
* Let the UVD memory controller know it's offsets
*/
static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
{
uint64_t offset;
uint32_t size;
/* program memory controller bits 0-27 */
WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
lower_32_bits(adev->uvd.inst->gpu_addr));
WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
upper_32_bits(adev->uvd.inst->gpu_addr));
offset = AMDGPU_UVD_FIRMWARE_OFFSET;
size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
offset += size;
size = AMDGPU_UVD_HEAP_SIZE;
WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
offset += size;
size = AMDGPU_UVD_STACK_SIZE +
(AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
}
#if 0
static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
bool enable)
{
u32 data, data1;
data = RREG32(mmUVD_CGC_GATE);
data1 = RREG32(mmUVD_SUVD_CGC_GATE);
if (enable) {
data |= UVD_CGC_GATE__SYS_MASK |
UVD_CGC_GATE__UDEC_MASK |
UVD_CGC_GATE__MPEG2_MASK |
UVD_CGC_GATE__RBC_MASK |
UVD_CGC_GATE__LMI_MC_MASK |
UVD_CGC_GATE__IDCT_MASK |
UVD_CGC_GATE__MPRD_MASK |
UVD_CGC_GATE__MPC_MASK |
UVD_CGC_GATE__LBSI_MASK |
UVD_CGC_GATE__LRBBM_MASK |
UVD_CGC_GATE__UDEC_RE_MASK |
UVD_CGC_GATE__UDEC_CM_MASK |
UVD_CGC_GATE__UDEC_IT_MASK |
UVD_CGC_GATE__UDEC_DB_MASK |
UVD_CGC_GATE__UDEC_MP_MASK |
UVD_CGC_GATE__WCB_MASK |
UVD_CGC_GATE__VCPU_MASK |
UVD_CGC_GATE__SCPU_MASK;
data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
UVD_SUVD_CGC_GATE__SIT_MASK |
UVD_SUVD_CGC_GATE__SMP_MASK |
UVD_SUVD_CGC_GATE__SCM_MASK |
UVD_SUVD_CGC_GATE__SDB_MASK |
UVD_SUVD_CGC_GATE__SRE_H264_MASK |
UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
UVD_SUVD_CGC_GATE__SIT_H264_MASK |
UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
UVD_SUVD_CGC_GATE__SCM_H264_MASK |
UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
UVD_SUVD_CGC_GATE__SDB_H264_MASK |
UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
} else {
data &= ~(UVD_CGC_GATE__SYS_MASK |
UVD_CGC_GATE__UDEC_MASK |
UVD_CGC_GATE__MPEG2_MASK |
UVD_CGC_GATE__RBC_MASK |
UVD_CGC_GATE__LMI_MC_MASK |
UVD_CGC_GATE__LMI_UMC_MASK |
UVD_CGC_GATE__IDCT_MASK |
UVD_CGC_GATE__MPRD_MASK |
UVD_CGC_GATE__MPC_MASK |
UVD_CGC_GATE__LBSI_MASK |
UVD_CGC_GATE__LRBBM_MASK |
UVD_CGC_GATE__UDEC_RE_MASK |
UVD_CGC_GATE__UDEC_CM_MASK |
UVD_CGC_GATE__UDEC_IT_MASK |
UVD_CGC_GATE__UDEC_DB_MASK |
UVD_CGC_GATE__UDEC_MP_MASK |
UVD_CGC_GATE__WCB_MASK |
UVD_CGC_GATE__VCPU_MASK |
UVD_CGC_GATE__SCPU_MASK);
data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
UVD_SUVD_CGC_GATE__SIT_MASK |
UVD_SUVD_CGC_GATE__SMP_MASK |
UVD_SUVD_CGC_GATE__SCM_MASK |
UVD_SUVD_CGC_GATE__SDB_MASK |
UVD_SUVD_CGC_GATE__SRE_H264_MASK |
UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
UVD_SUVD_CGC_GATE__SIT_H264_MASK |
UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
UVD_SUVD_CGC_GATE__SCM_H264_MASK |
UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
UVD_SUVD_CGC_GATE__SDB_H264_MASK |
UVD_SUVD_CGC_GATE__SDB_HEVC_MASK);
}
WREG32(mmUVD_CGC_GATE, data);
WREG32(mmUVD_SUVD_CGC_GATE, data1);
}
#endif
/**
* uvd_v6_0_start - start UVD block
*
* @adev: amdgpu_device pointer
*
* Setup and start the UVD block
*/
static int uvd_v6_0_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t rb_bufsz, tmp;
uint32_t lmi_swap_cntl;
uint32_t mp_swap_cntl;
int i, j, r;
/* disable DPG */
WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
/* disable byte swapping */
lmi_swap_cntl = 0;
mp_swap_cntl = 0;
uvd_v6_0_mc_resume(adev);
/* disable interupt */
WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
/* stall UMC and register bus before resetting VCPU */
WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1);
mdelay(1);
/* put LMI, VCPU, RBC etc... into reset */
WREG32(mmUVD_SOFT_RESET,
UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
mdelay(5);
/* take UVD block out of reset */
WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0);
mdelay(5);
/* initialize UVD memory controller */
WREG32(mmUVD_LMI_CTRL,
(0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
UVD_LMI_CTRL__REQ_MODE_MASK |
UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
#ifdef __BIG_ENDIAN
/* swap (8 in 32) RB and IB */
lmi_swap_cntl = 0xa;
mp_swap_cntl = 0;
#endif
WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
WREG32(mmUVD_MPC_SET_ALU, 0);
WREG32(mmUVD_MPC_SET_MUX, 0x88);
/* take all subblocks out of reset, except VCPU */
WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
mdelay(5);
/* enable VCPU clock */
WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
/* enable UMC */
WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0);
/* boot up the VCPU */
WREG32(mmUVD_SOFT_RESET, 0);
mdelay(10);
for (i = 0; i < 10; ++i) {
uint32_t status;
for (j = 0; j < 100; ++j) {
status = RREG32(mmUVD_STATUS);
if (status & 2)
break;
mdelay(10);
}
r = 0;
if (status & 2)
break;
DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1);
mdelay(10);
WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0);
mdelay(10);
r = -1;
}
if (r) {
DRM_ERROR("UVD not responding, giving up!!!\n");
return r;
}
/* enable master interrupt */
WREG32_P(mmUVD_MASTINT_EN,
(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
/* clear the bit 4 of UVD_STATUS */
WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
/* force RBC into idle state */
rb_bufsz = order_base_2(ring->ring_size);
tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
WREG32(mmUVD_RBC_RB_CNTL, tmp);
/* set the write pointer delay */
WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
/* set the wb address */
WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
/* program the RB_BASE for ring buffer */
WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
upper_32_bits(ring->gpu_addr));
/* Initialize the ring buffer's read and write pointers */
WREG32(mmUVD_RBC_RB_RPTR, 0);
ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
if (uvd_v6_0_enc_support(adev)) {
ring = &adev->uvd.inst->ring_enc[0];
WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr);
WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32(mmUVD_RB_SIZE, ring->ring_size / 4);
ring = &adev->uvd.inst->ring_enc[1];
WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr);
WREG32(mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32(mmUVD_RB_SIZE2, ring->ring_size / 4);
}
return 0;
}
/**
* uvd_v6_0_stop - stop UVD block
*
* @adev: amdgpu_device pointer
*
* stop the UVD block
*/
static void uvd_v6_0_stop(struct amdgpu_device *adev)
{
/* force RBC into idle state */
WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
/* Stall UMC and register bus before resetting VCPU */
WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
mdelay(1);
/* put VCPU into reset */
WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
mdelay(5);
/* disable VCPU clock */
WREG32(mmUVD_VCPU_CNTL, 0x0);
/* Unstall UMC and register bus */
WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
WREG32(mmUVD_STATUS, 0);
}
/**
* uvd_v6_0_ring_emit_fence - emit an fence & trap command
*
* @ring: amdgpu_ring pointer
* @addr: address
* @seq: sequence number
* @flags: fence related flags
*
* Write a fence and a trap command to the ring.
*/
static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags)
{
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
amdgpu_ring_write(ring, seq);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
amdgpu_ring_write(ring, addr & 0xffffffff);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
amdgpu_ring_write(ring, 2);
}
/**
* uvd_v6_0_enc_ring_emit_fence - emit an enc fence & trap command
*
* @ring: amdgpu_ring pointer
* @addr: address
* @seq: sequence number
* @flags: fence related flags
*
* Write enc a fence and a trap command to the ring.
*/
static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
u64 seq, unsigned flags)
{
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
amdgpu_ring_write(ring, addr);
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, seq);
amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
}
/**
* uvd_v6_0_ring_emit_hdp_flush - skip HDP flushing
*
* @ring: amdgpu_ring pointer
*/
static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
/* The firmware doesn't seem to like touching registers at this point. */
}
/**
* uvd_v6_0_ring_test_ring - register write test
*
* @ring: amdgpu_ring pointer
*
* Test if we can successfully write to the context register
*/
static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
uint32_t tmp = 0;
unsigned i;
int r;
WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
if (r)
return r;
amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(mmUVD_CONTEXT_ID);
if (tmp == 0xDEADBEEF)
break;
udelay(1);
}
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
return r;
}
/**
* uvd_v6_0_ring_emit_ib - execute indirect buffer
*
* @ring: amdgpu_ring pointer
* @job: job to retrieve vmid from
* @ib: indirect buffer to execute
* @flags: unused
*
* Write ring commands to execute the indirect buffer
*/
static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_job *job,
struct amdgpu_ib *ib,
uint32_t flags)
{
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
amdgpu_ring_write(ring, ib->length_dw);
}
/**
* uvd_v6_0_enc_ring_emit_ib - enc execute indirect buffer
*
* @ring: amdgpu_ring pointer
* @job: job to retrive vmid from
* @ib: indirect buffer to execute
* @flags: unused
*
* Write enc ring commands to execute the indirect buffer
*/
static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_job *job,
struct amdgpu_ib *ib,
uint32_t flags)
{
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, ib->length_dw);
}
static void uvd_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
uint32_t reg, uint32_t val)
{
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
amdgpu_ring_write(ring, reg << 2);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
amdgpu_ring_write(ring, val);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
amdgpu_ring_write(ring, 0x8);
}
static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
amdgpu_ring_write(ring, 1 << vmid); /* mask */
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
amdgpu_ring_write(ring, 0xC);
}
static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
amdgpu_ring_write(ring, 0xffffffff); /* mask */
amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
amdgpu_ring_write(ring, seq);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
amdgpu_ring_write(ring, 0xE);
}
static void uvd_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
int i;
WARN_ON(ring->wptr % 2 || count % 2);
for (i = 0; i < count / 2; i++) {
amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
amdgpu_ring_write(ring, 0);
}
}
static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
amdgpu_ring_write(ring, HEVC_ENC_CMD_WAIT_GE);
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, seq);
}
static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
}
static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vmid, uint64_t pd_addr)
{
amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB);
amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, pd_addr >> 12);
amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB);
amdgpu_ring_write(ring, vmid);
}
static bool uvd_v6_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
}
static int uvd_v6_0_wait_for_idle(void *handle)
{
unsigned i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
if (uvd_v6_0_is_idle(handle))
return 0;
}
return -ETIMEDOUT;
}
#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
static bool uvd_v6_0_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS);
if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
(RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK))
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
if (srbm_soft_reset) {
adev->uvd.inst->srbm_soft_reset = srbm_soft_reset;
return true;
} else {
adev->uvd.inst->srbm_soft_reset = 0;
return false;
}
}
static int uvd_v6_0_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->uvd.inst->srbm_soft_reset)
return 0;
uvd_v6_0_stop(adev);
return 0;
}
static int uvd_v6_0_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset;
if (!adev->uvd.inst->srbm_soft_reset)
return 0;
srbm_soft_reset = adev->uvd.inst->srbm_soft_reset;
if (srbm_soft_reset) {
u32 tmp;
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(mmSRBM_SOFT_RESET, tmp);
tmp = RREG32(mmSRBM_SOFT_RESET);
udelay(50);
tmp &= ~srbm_soft_reset;
WREG32(mmSRBM_SOFT_RESET, tmp);
tmp = RREG32(mmSRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
}
return 0;
}
static int uvd_v6_0_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->uvd.inst->srbm_soft_reset)
return 0;
mdelay(5);
return uvd_v6_0_start(adev);
}
static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
// TODO
return 0;
}
static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
bool int_handled = true;
DRM_DEBUG("IH: UVD TRAP\n");
switch (entry->src_id) {
case 124:
amdgpu_fence_process(&adev->uvd.inst->ring);
break;
case 119:
if (likely(uvd_v6_0_enc_support(adev)))
amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]);
else
int_handled = false;
break;
case 120:
if (likely(uvd_v6_0_enc_support(adev)))
amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]);
else
int_handled = false;
break;
}
if (!int_handled)
DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
return 0;
}
static void uvd_v6_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
{
uint32_t data1, data3;
data1 = RREG32(mmUVD_SUVD_CGC_GATE);
data3 = RREG32(mmUVD_CGC_GATE);
data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
UVD_SUVD_CGC_GATE__SIT_MASK |
UVD_SUVD_CGC_GATE__SMP_MASK |
UVD_SUVD_CGC_GATE__SCM_MASK |
UVD_SUVD_CGC_GATE__SDB_MASK |
UVD_SUVD_CGC_GATE__SRE_H264_MASK |
UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
UVD_SUVD_CGC_GATE__SIT_H264_MASK |
UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
UVD_SUVD_CGC_GATE__SCM_H264_MASK |
UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
UVD_SUVD_CGC_GATE__SDB_H264_MASK |
UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
if (enable) {
data3 |= (UVD_CGC_GATE__SYS_MASK |
UVD_CGC_GATE__UDEC_MASK |
UVD_CGC_GATE__MPEG2_MASK |
UVD_CGC_GATE__RBC_MASK |
UVD_CGC_GATE__LMI_MC_MASK |
UVD_CGC_GATE__LMI_UMC_MASK |
UVD_CGC_GATE__IDCT_MASK |
UVD_CGC_GATE__MPRD_MASK |
UVD_CGC_GATE__MPC_MASK |
UVD_CGC_GATE__LBSI_MASK |
UVD_CGC_GATE__LRBBM_MASK |
UVD_CGC_GATE__UDEC_RE_MASK |
UVD_CGC_GATE__UDEC_CM_MASK |
UVD_CGC_GATE__UDEC_IT_MASK |
UVD_CGC_GATE__UDEC_DB_MASK |
UVD_CGC_GATE__UDEC_MP_MASK |
UVD_CGC_GATE__WCB_MASK |
UVD_CGC_GATE__JPEG_MASK |
UVD_CGC_GATE__SCPU_MASK |
UVD_CGC_GATE__JPEG2_MASK);
/* only in pg enabled, we can gate clock to vcpu*/
if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
data3 |= UVD_CGC_GATE__VCPU_MASK;
data3 &= ~UVD_CGC_GATE__REGS_MASK;
} else {
data3 = 0;
}
WREG32(mmUVD_SUVD_CGC_GATE, data1);
WREG32(mmUVD_CGC_GATE, data3);
}
static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
{
uint32_t data, data2;
data = RREG32(mmUVD_CGC_CTRL);
data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
(1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
(4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
UVD_CGC_CTRL__SYS_MODE_MASK |
UVD_CGC_CTRL__UDEC_MODE_MASK |
UVD_CGC_CTRL__MPEG2_MODE_MASK |
UVD_CGC_CTRL__REGS_MODE_MASK |
UVD_CGC_CTRL__RBC_MODE_MASK |
UVD_CGC_CTRL__LMI_MC_MODE_MASK |
UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
UVD_CGC_CTRL__IDCT_MODE_MASK |
UVD_CGC_CTRL__MPRD_MODE_MASK |
UVD_CGC_CTRL__MPC_MODE_MASK |
UVD_CGC_CTRL__LBSI_MODE_MASK |
UVD_CGC_CTRL__LRBBM_MODE_MASK |
UVD_CGC_CTRL__WCB_MODE_MASK |
UVD_CGC_CTRL__VCPU_MODE_MASK |
UVD_CGC_CTRL__JPEG_MODE_MASK |
UVD_CGC_CTRL__SCPU_MODE_MASK |
UVD_CGC_CTRL__JPEG2_MODE_MASK);
data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
WREG32(mmUVD_CGC_CTRL, data);
WREG32(mmUVD_SUVD_CGC_CTRL, data2);
}
#if 0
static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
{
uint32_t data, data1, cgc_flags, suvd_flags;
data = RREG32(mmUVD_CGC_GATE);
data1 = RREG32(mmUVD_SUVD_CGC_GATE);
cgc_flags = UVD_CGC_GATE__SYS_MASK |
UVD_CGC_GATE__UDEC_MASK |
UVD_CGC_GATE__MPEG2_MASK |
UVD_CGC_GATE__RBC_MASK |
UVD_CGC_GATE__LMI_MC_MASK |
UVD_CGC_GATE__IDCT_MASK |
UVD_CGC_GATE__MPRD_MASK |
UVD_CGC_GATE__MPC_MASK |
UVD_CGC_GATE__LBSI_MASK |
UVD_CGC_GATE__LRBBM_MASK |
UVD_CGC_GATE__UDEC_RE_MASK |
UVD_CGC_GATE__UDEC_CM_MASK |
UVD_CGC_GATE__UDEC_IT_MASK |
UVD_CGC_GATE__UDEC_DB_MASK |
UVD_CGC_GATE__UDEC_MP_MASK |
UVD_CGC_GATE__WCB_MASK |
UVD_CGC_GATE__VCPU_MASK |
UVD_CGC_GATE__SCPU_MASK |
UVD_CGC_GATE__JPEG_MASK |
UVD_CGC_GATE__JPEG2_MASK;
suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
UVD_SUVD_CGC_GATE__SIT_MASK |
UVD_SUVD_CGC_GATE__SMP_MASK |
UVD_SUVD_CGC_GATE__SCM_MASK |
UVD_SUVD_CGC_GATE__SDB_MASK;
data |= cgc_flags;
data1 |= suvd_flags;
WREG32(mmUVD_CGC_GATE, data);
WREG32(mmUVD_SUVD_CGC_GATE, data1);
}
#endif
static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
bool enable)
{
u32 orig, data;
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
data |= 0xfff;
WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
orig = data = RREG32(mmUVD_CGC_CTRL);
data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
if (orig != data)
WREG32(mmUVD_CGC_CTRL, data);
} else {
data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
data &= ~0xfff;
WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
orig = data = RREG32(mmUVD_CGC_CTRL);
data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
if (orig != data)
WREG32(mmUVD_CGC_CTRL, data);
}
}
static int uvd_v6_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
/* wait for STATUS to clear */
if (uvd_v6_0_wait_for_idle(handle))
return -EBUSY;
uvd_v6_0_enable_clock_gating(adev, true);
/* enable HW gates because UVD is idle */
/* uvd_v6_0_set_hw_clock_gating(adev); */
} else {
/* disable HW gating and enable Sw gating */
uvd_v6_0_enable_clock_gating(adev, false);
}
uvd_v6_0_set_sw_clock_gating(adev);
return 0;
}
static int uvd_v6_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the UVD block.
* That's done in the dpm code via the SMC. This
* just re-inits the block as necessary. The actual
* gating still happens in the dpm code. We should
* revisit this when there is a cleaner line between
* the smc and the hw blocks
*/
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret = 0;
WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
if (state == AMD_PG_STATE_GATE) {
uvd_v6_0_stop(adev);
} else {
ret = uvd_v6_0_start(adev);
if (ret)
goto out;
}
out:
return ret;
}
static void uvd_v6_0_get_clockgating_state(void *handle, u64 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int data;
mutex_lock(&adev->pm.mutex);
if (adev->flags & AMD_IS_APU)
data = RREG32_SMC(ixCURRENT_PG_STATUS_APU);
else
data = RREG32_SMC(ixCURRENT_PG_STATUS);
if (data & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
goto out;
}
/* AMD_CG_SUPPORT_UVD_MGCG */
data = RREG32(mmUVD_CGC_CTRL);
if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
*flags |= AMD_CG_SUPPORT_UVD_MGCG;
out:
mutex_unlock(&adev->pm.mutex);
}
static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
.name = "uvd_v6_0",
.early_init = uvd_v6_0_early_init,
.late_init = NULL,
.sw_init = uvd_v6_0_sw_init,
.sw_fini = uvd_v6_0_sw_fini,
.hw_init = uvd_v6_0_hw_init,
.hw_fini = uvd_v6_0_hw_fini,
.suspend = uvd_v6_0_suspend,
.resume = uvd_v6_0_resume,
.is_idle = uvd_v6_0_is_idle,
.wait_for_idle = uvd_v6_0_wait_for_idle,
.check_soft_reset = uvd_v6_0_check_soft_reset,
.pre_soft_reset = uvd_v6_0_pre_soft_reset,
.soft_reset = uvd_v6_0_soft_reset,
.post_soft_reset = uvd_v6_0_post_soft_reset,
.set_clockgating_state = uvd_v6_0_set_clockgating_state,
.set_powergating_state = uvd_v6_0_set_powergating_state,
.get_clockgating_state = uvd_v6_0_get_clockgating_state,
};
static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
.type = AMDGPU_RING_TYPE_UVD,
.align_mask = 0xf,
.support_64bit_ptrs = false,
.no_user_fence = true,
.get_rptr = uvd_v6_0_ring_get_rptr,
.get_wptr = uvd_v6_0_ring_get_wptr,
.set_wptr = uvd_v6_0_ring_set_wptr,
.parse_cs = amdgpu_uvd_ring_parse_cs,
.emit_frame_size =
6 + /* hdp invalidate */
10 + /* uvd_v6_0_ring_emit_pipeline_sync */
14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
.emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
.emit_ib = uvd_v6_0_ring_emit_ib,
.emit_fence = uvd_v6_0_ring_emit_fence,
.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
.test_ring = uvd_v6_0_ring_test_ring,
.test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = uvd_v6_0_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
.emit_wreg = uvd_v6_0_ring_emit_wreg,
};
static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_UVD,
.align_mask = 0xf,
.support_64bit_ptrs = false,
.no_user_fence = true,
.get_rptr = uvd_v6_0_ring_get_rptr,
.get_wptr = uvd_v6_0_ring_get_wptr,
.set_wptr = uvd_v6_0_ring_set_wptr,
.emit_frame_size =
6 + /* hdp invalidate */
10 + /* uvd_v6_0_ring_emit_pipeline_sync */
VI_FLUSH_GPU_TLB_NUM_WREG * 6 + 8 + /* uvd_v6_0_ring_emit_vm_flush */
14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
.emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
.emit_ib = uvd_v6_0_ring_emit_ib,
.emit_fence = uvd_v6_0_ring_emit_fence,
.emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
.emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
.test_ring = uvd_v6_0_ring_test_ring,
.test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = uvd_v6_0_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
.emit_wreg = uvd_v6_0_ring_emit_wreg,
};
static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_UVD_ENC,
.align_mask = 0x3f,
.nop = HEVC_ENC_CMD_NO_OP,
.support_64bit_ptrs = false,
.no_user_fence = true,
.get_rptr = uvd_v6_0_enc_ring_get_rptr,
.get_wptr = uvd_v6_0_enc_ring_get_wptr,
.set_wptr = uvd_v6_0_enc_ring_set_wptr,
.emit_frame_size =
4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */
5 + /* uvd_v6_0_enc_ring_emit_vm_flush */
5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */
1, /* uvd_v6_0_enc_ring_insert_end */
.emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */
.emit_ib = uvd_v6_0_enc_ring_emit_ib,
.emit_fence = uvd_v6_0_enc_ring_emit_fence,
.emit_vm_flush = uvd_v6_0_enc_ring_emit_vm_flush,
.emit_pipeline_sync = uvd_v6_0_enc_ring_emit_pipeline_sync,
.test_ring = uvd_v6_0_enc_ring_test_ring,
.test_ib = uvd_v6_0_enc_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.insert_end = uvd_v6_0_enc_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
};
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
{
if (adev->asic_type >= CHIP_POLARIS10) {
adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs;
DRM_INFO("UVD is enabled in VM mode\n");
} else {
adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs;
DRM_INFO("UVD is enabled in physical mode\n");
}
}
static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev)
{
int i;
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
adev->uvd.inst->ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs;
DRM_INFO("UVD ENC is enabled in VM mode\n");
}
static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
.set = uvd_v6_0_set_interrupt_state,
.process = uvd_v6_0_process_interrupt,
};
static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
{
if (uvd_v6_0_enc_support(adev))
adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1;
else
adev->uvd.inst->irq.num_types = 1;
adev->uvd.inst->irq.funcs = &uvd_v6_0_irq_funcs;
}
const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_UVD,
.major = 6,
.minor = 0,
.rev = 0,
.funcs = &uvd_v6_0_ip_funcs,
};
const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_UVD,
.major = 6,
.minor = 2,
.rev = 0,
.funcs = &uvd_v6_0_ip_funcs,
};
const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_UVD,
.major = 6,
.minor = 3,
.rev = 0,
.funcs = &uvd_v6_0_ip_funcs,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c |
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/pci.h>
#include "amdgpu.h"
#include "amdgpu_ucode.h"
#include "amdgpu_trace.h"
#include "sdma0/sdma0_4_2_offset.h"
#include "sdma0/sdma0_4_2_sh_mask.h"
#include "sdma1/sdma1_4_2_offset.h"
#include "sdma1/sdma1_4_2_sh_mask.h"
#include "sdma2/sdma2_4_2_2_offset.h"
#include "sdma2/sdma2_4_2_2_sh_mask.h"
#include "sdma3/sdma3_4_2_2_offset.h"
#include "sdma3/sdma3_4_2_2_sh_mask.h"
#include "sdma4/sdma4_4_2_2_offset.h"
#include "sdma4/sdma4_4_2_2_sh_mask.h"
#include "sdma5/sdma5_4_2_2_offset.h"
#include "sdma5/sdma5_4_2_2_sh_mask.h"
#include "sdma6/sdma6_4_2_2_offset.h"
#include "sdma6/sdma6_4_2_2_sh_mask.h"
#include "sdma7/sdma7_4_2_2_offset.h"
#include "sdma7/sdma7_4_2_2_sh_mask.h"
#include "sdma0/sdma0_4_1_default.h"
#include "soc15_common.h"
#include "soc15.h"
#include "vega10_sdma_pkt_open.h"
#include "ivsrcid/sdma0/irqsrcs_sdma0_4_0.h"
#include "ivsrcid/sdma1/irqsrcs_sdma1_4_0.h"
#include "amdgpu_ras.h"
#include "sdma_v4_4.h"
MODULE_FIRMWARE("amdgpu/vega10_sdma.bin");
MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin");
MODULE_FIRMWARE("amdgpu/vega12_sdma.bin");
MODULE_FIRMWARE("amdgpu/vega12_sdma1.bin");
MODULE_FIRMWARE("amdgpu/vega20_sdma.bin");
MODULE_FIRMWARE("amdgpu/vega20_sdma1.bin");
MODULE_FIRMWARE("amdgpu/raven_sdma.bin");
MODULE_FIRMWARE("amdgpu/picasso_sdma.bin");
MODULE_FIRMWARE("amdgpu/raven2_sdma.bin");
MODULE_FIRMWARE("amdgpu/arcturus_sdma.bin");
MODULE_FIRMWARE("amdgpu/renoir_sdma.bin");
MODULE_FIRMWARE("amdgpu/green_sardine_sdma.bin");
MODULE_FIRMWARE("amdgpu/aldebaran_sdma.bin");
#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
#define WREG32_SDMA(instance, offset, value) \
WREG32(sdma_v4_0_get_reg_offset(adev, (instance), (offset)), value)
#define RREG32_SDMA(instance, offset) \
RREG32(sdma_v4_0_get_reg_offset(adev, (instance), (offset)))
static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev);
static const struct soc15_reg_golden golden_settings_sdma_4[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xff000ff0, 0x3f000100),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_IB_CNTL, 0x800f0100, 0x00000100),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_IB_CNTL, 0x800f0100, 0x00000100),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0x003ff006, 0x0003c000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL, 0x800f0100, 0x00000100),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_IB_CNTL, 0x800f0100, 0x00000100),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_POWER_CNTL, 0x003ff000, 0x0003c000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_IB_CNTL, 0x800f0100, 0x00000100),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_WATERMK, 0xfc000000, 0x00000000)
};
static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
};
static const struct soc15_reg_golden golden_settings_sdma_vg12[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
};
static const struct soc15_reg_golden golden_settings_sdma_4_1[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0xfc3fffff, 0x40000051),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003e0),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
};
static const struct soc15_reg_golden golden_settings_sdma0_4_2_init[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff0, 0x00403000),
};
static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
{
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RD_BURST_CNTL, 0x0000000f, 0x00000003),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff0, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC2_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC3_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC4_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC5_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC6_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
};
static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RD_BURST_CNTL, 0x0000000f, 0x00000003),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff0, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC2_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC3_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC4_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC5_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC6_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
};
static const struct soc15_reg_golden golden_settings_sdma_rv1[] =
{
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00000002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00000002)
};
static const struct soc15_reg_golden golden_settings_sdma_rv2[] =
{
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00003001),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00003001)
};
static const struct soc15_reg_golden golden_settings_sdma_arct[] =
{
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_UTCL1_TIMEOUT, 0xffffffff, 0x00010001)
};
static const struct soc15_reg_golden golden_settings_sdma_aldebaran[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA2_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
};
static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00000002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00000002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0x003fff07, 0x40000051),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003e0),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x03fbe1fe)
};
static const struct soc15_ras_field_entry sdma_v4_0_ras_fields[] = {
{ "SDMA_UCODE_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_UCODE_BUF_SED),
0, 0,
},
{ "SDMA_RB_CMD_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_RB_CMD_BUF_SED),
0, 0,
},
{ "SDMA_IB_CMD_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_IB_CMD_BUF_SED),
0, 0,
},
{ "SDMA_UTCL1_RD_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_UTCL1_RD_FIFO_SED),
0, 0,
},
{ "SDMA_UTCL1_RDBST_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_UTCL1_RDBST_FIFO_SED),
0, 0,
},
{ "SDMA_DATA_LUT_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_DATA_LUT_FIFO_SED),
0, 0,
},
{ "SDMA_MBANK_DATA_BUF0_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF0_SED),
0, 0,
},
{ "SDMA_MBANK_DATA_BUF1_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF1_SED),
0, 0,
},
{ "SDMA_MBANK_DATA_BUF2_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF2_SED),
0, 0,
},
{ "SDMA_MBANK_DATA_BUF3_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF3_SED),
0, 0,
},
{ "SDMA_MBANK_DATA_BUF4_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF4_SED),
0, 0,
},
{ "SDMA_MBANK_DATA_BUF5_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF5_SED),
0, 0,
},
{ "SDMA_MBANK_DATA_BUF6_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF6_SED),
0, 0,
},
{ "SDMA_MBANK_DATA_BUF7_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF7_SED),
0, 0,
},
{ "SDMA_MBANK_DATA_BUF8_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF8_SED),
0, 0,
},
{ "SDMA_MBANK_DATA_BUF9_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF9_SED),
0, 0,
},
{ "SDMA_MBANK_DATA_BUF10_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF10_SED),
0, 0,
},
{ "SDMA_MBANK_DATA_BUF11_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF11_SED),
0, 0,
},
{ "SDMA_MBANK_DATA_BUF12_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF12_SED),
0, 0,
},
{ "SDMA_MBANK_DATA_BUF13_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF13_SED),
0, 0,
},
{ "SDMA_MBANK_DATA_BUF14_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF14_SED),
0, 0,
},
{ "SDMA_MBANK_DATA_BUF15_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF15_SED),
0, 0,
},
{ "SDMA_SPLIT_DAT_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_SPLIT_DAT_BUF_SED),
0, 0,
},
{ "SDMA_MC_WR_ADDR_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MC_WR_ADDR_FIFO_SED),
0, 0,
},
};
static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
u32 instance, u32 offset)
{
switch (instance) {
case 0:
return (adev->reg_offset[SDMA0_HWIP][0][0] + offset);
case 1:
return (adev->reg_offset[SDMA1_HWIP][0][0] + offset);
case 2:
return (adev->reg_offset[SDMA2_HWIP][0][1] + offset);
case 3:
return (adev->reg_offset[SDMA3_HWIP][0][1] + offset);
case 4:
return (adev->reg_offset[SDMA4_HWIP][0][1] + offset);
case 5:
return (adev->reg_offset[SDMA5_HWIP][0][1] + offset);
case 6:
return (adev->reg_offset[SDMA6_HWIP][0][1] + offset);
case 7:
return (adev->reg_offset[SDMA7_HWIP][0][1] + offset);
default:
break;
}
return 0;
}
static unsigned sdma_v4_0_seq_to_irq_id(int seq_num)
{
switch (seq_num) {
case 0:
return SOC15_IH_CLIENTID_SDMA0;
case 1:
return SOC15_IH_CLIENTID_SDMA1;
case 2:
return SOC15_IH_CLIENTID_SDMA2;
case 3:
return SOC15_IH_CLIENTID_SDMA3;
case 4:
return SOC15_IH_CLIENTID_SDMA4;
case 5:
return SOC15_IH_CLIENTID_SDMA5;
case 6:
return SOC15_IH_CLIENTID_SDMA6;
case 7:
return SOC15_IH_CLIENTID_SDMA7;
default:
break;
}
return -EINVAL;
}
static int sdma_v4_0_irq_id_to_seq(unsigned client_id)
{
switch (client_id) {
case SOC15_IH_CLIENTID_SDMA0:
return 0;
case SOC15_IH_CLIENTID_SDMA1:
return 1;
case SOC15_IH_CLIENTID_SDMA2:
return 2;
case SOC15_IH_CLIENTID_SDMA3:
return 3;
case SOC15_IH_CLIENTID_SDMA4:
return 4;
case SOC15_IH_CLIENTID_SDMA5:
return 5;
case SOC15_IH_CLIENTID_SDMA6:
return 6;
case SOC15_IH_CLIENTID_SDMA7:
return 7;
default:
break;
}
return -EINVAL;
}
static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->ip_versions[SDMA0_HWIP][0]) {
case IP_VERSION(4, 0, 0):
soc15_program_register_sequence(adev,
golden_settings_sdma_4,
ARRAY_SIZE(golden_settings_sdma_4));
soc15_program_register_sequence(adev,
golden_settings_sdma_vg10,
ARRAY_SIZE(golden_settings_sdma_vg10));
break;
case IP_VERSION(4, 0, 1):
soc15_program_register_sequence(adev,
golden_settings_sdma_4,
ARRAY_SIZE(golden_settings_sdma_4));
soc15_program_register_sequence(adev,
golden_settings_sdma_vg12,
ARRAY_SIZE(golden_settings_sdma_vg12));
break;
case IP_VERSION(4, 2, 0):
soc15_program_register_sequence(adev,
golden_settings_sdma0_4_2_init,
ARRAY_SIZE(golden_settings_sdma0_4_2_init));
soc15_program_register_sequence(adev,
golden_settings_sdma0_4_2,
ARRAY_SIZE(golden_settings_sdma0_4_2));
soc15_program_register_sequence(adev,
golden_settings_sdma1_4_2,
ARRAY_SIZE(golden_settings_sdma1_4_2));
break;
case IP_VERSION(4, 2, 2):
soc15_program_register_sequence(adev,
golden_settings_sdma_arct,
ARRAY_SIZE(golden_settings_sdma_arct));
break;
case IP_VERSION(4, 4, 0):
soc15_program_register_sequence(adev,
golden_settings_sdma_aldebaran,
ARRAY_SIZE(golden_settings_sdma_aldebaran));
break;
case IP_VERSION(4, 1, 0):
case IP_VERSION(4, 1, 1):
soc15_program_register_sequence(adev,
golden_settings_sdma_4_1,
ARRAY_SIZE(golden_settings_sdma_4_1));
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
soc15_program_register_sequence(adev,
golden_settings_sdma_rv2,
ARRAY_SIZE(golden_settings_sdma_rv2));
else
soc15_program_register_sequence(adev,
golden_settings_sdma_rv1,
ARRAY_SIZE(golden_settings_sdma_rv1));
break;
case IP_VERSION(4, 1, 2):
soc15_program_register_sequence(adev,
golden_settings_sdma_4_3,
ARRAY_SIZE(golden_settings_sdma_4_3));
break;
default:
break;
}
}
static void sdma_v4_0_setup_ulv(struct amdgpu_device *adev)
{
int i;
/*
* The only chips with SDMAv4 and ULV are VG10 and VG20.
* Server SKUs take a different hysteresis setting from other SKUs.
*/
switch (adev->ip_versions[SDMA0_HWIP][0]) {
case IP_VERSION(4, 0, 0):
if (adev->pdev->device == 0x6860)
break;
return;
case IP_VERSION(4, 2, 0):
if (adev->pdev->device == 0x66a1)
break;
return;
default:
return;
}
for (i = 0; i < adev->sdma.num_instances; i++) {
uint32_t temp;
temp = RREG32_SDMA(i, mmSDMA0_ULV_CNTL);
temp = REG_SET_FIELD(temp, SDMA0_ULV_CNTL, HYSTERESIS, 0x0);
WREG32_SDMA(i, mmSDMA0_ULV_CNTL, temp);
}
}
/**
* sdma_v4_0_init_microcode - load ucode images from disk
*
* @adev: amdgpu_device pointer
*
* Use the firmware interface to load the ucode images into
* the driver (not loaded into hw).
* Returns 0 on success, error on failure.
*/
// emulation only, won't work on real chip
// vega10 real chip need to use PSP to load firmware
static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
{
int ret, i;
for (i = 0; i < adev->sdma.num_instances; i++) {
if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) ||
adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0)) {
/* Acturus & Aldebaran will leverage the same FW memory
for every SDMA instance */
ret = amdgpu_sdma_init_microcode(adev, 0, true);
break;
} else {
ret = amdgpu_sdma_init_microcode(adev, i, false);
if (ret)
return ret;
}
}
return ret;
}
/**
* sdma_v4_0_ring_get_rptr - get the current read pointer
*
* @ring: amdgpu ring pointer
*
* Get the current rptr from the hardware (VEGA10+).
*/
static uint64_t sdma_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
{
u64 *rptr;
/* XXX check if swapping is necessary on BE */
rptr = ((u64 *)ring->rptr_cpu_addr);
DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr);
return ((*rptr) >> 2);
}
/**
* sdma_v4_0_ring_get_wptr - get the current write pointer
*
* @ring: amdgpu ring pointer
*
* Get the current wptr from the hardware (VEGA10+).
*/
static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u64 wptr;
if (ring->use_doorbell) {
/* XXX check if swapping is necessary on BE */
wptr = READ_ONCE(*((u64 *)ring->wptr_cpu_addr));
DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
} else {
wptr = RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI);
wptr = wptr << 32;
wptr |= RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR);
DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n",
ring->me, wptr);
}
return wptr >> 2;
}
/**
* sdma_v4_0_ring_set_wptr - commit the write pointer
*
* @ring: amdgpu ring pointer
*
* Write the wptr back to the hardware (VEGA10+).
*/
static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
DRM_DEBUG("Setting write pointer\n");
if (ring->use_doorbell) {
u64 *wb = (u64 *)ring->wptr_cpu_addr;
DRM_DEBUG("Using doorbell -- "
"wptr_offs == 0x%08x "
"lower_32_bits(ring->wptr << 2) == 0x%08x "
"upper_32_bits(ring->wptr << 2) == 0x%08x\n",
ring->wptr_offs,
lower_32_bits(ring->wptr << 2),
upper_32_bits(ring->wptr << 2));
/* XXX check if swapping is necessary on BE */
WRITE_ONCE(*wb, (ring->wptr << 2));
DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
ring->doorbell_index, ring->wptr << 2);
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
} else {
DRM_DEBUG("Not using doorbell -- "
"mmSDMA%i_GFX_RB_WPTR == 0x%08x "
"mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
ring->me,
lower_32_bits(ring->wptr << 2),
ring->me,
upper_32_bits(ring->wptr << 2));
WREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR,
lower_32_bits(ring->wptr << 2));
WREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI,
upper_32_bits(ring->wptr << 2));
}
}
/**
* sdma_v4_0_page_ring_get_wptr - get the current write pointer
*
* @ring: amdgpu ring pointer
*
* Get the current wptr from the hardware (VEGA10+).
*/
static uint64_t sdma_v4_0_page_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u64 wptr;
if (ring->use_doorbell) {
/* XXX check if swapping is necessary on BE */
wptr = READ_ONCE(*((u64 *)ring->wptr_cpu_addr));
} else {
wptr = RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI);
wptr = wptr << 32;
wptr |= RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR);
}
return wptr >> 2;
}
/**
* sdma_v4_0_page_ring_set_wptr - commit the write pointer
*
* @ring: amdgpu ring pointer
*
* Write the wptr back to the hardware (VEGA10+).
*/
static void sdma_v4_0_page_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring->use_doorbell) {
u64 *wb = (u64 *)ring->wptr_cpu_addr;
/* XXX check if swapping is necessary on BE */
WRITE_ONCE(*wb, (ring->wptr << 2));
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
} else {
uint64_t wptr = ring->wptr << 2;
WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR,
lower_32_bits(wptr));
WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI,
upper_32_bits(wptr));
}
}
static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
int i;
for (i = 0; i < count; i++)
if (sdma && sdma->burst_nop && (i == 0))
amdgpu_ring_write(ring, ring->funcs->nop |
SDMA_PKT_NOP_HEADER_COUNT(count - 1));
else
amdgpu_ring_write(ring, ring->funcs->nop);
}
/**
* sdma_v4_0_ring_emit_ib - Schedule an IB on the DMA engine
*
* @ring: amdgpu ring pointer
* @job: job to retrieve vmid from
* @ib: IB object to schedule
* @flags: unused
*
* Schedule an IB in the DMA ring (VEGA10).
*/
static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_job *job,
struct amdgpu_ib *ib,
uint32_t flags)
{
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
/* IB packet must end on a 8 DW boundary */
sdma_v4_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
/* base must be 32 byte aligned */
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, ib->length_dw);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 0);
}
static void sdma_v4_0_wait_reg_mem(struct amdgpu_ring *ring,
int mem_space, int hdp,
uint32_t addr0, uint32_t addr1,
uint32_t ref, uint32_t mask,
uint32_t inv)
{
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(hdp) |
SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(mem_space) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
if (mem_space) {
/* memory */
amdgpu_ring_write(ring, addr0);
amdgpu_ring_write(ring, addr1);
} else {
/* registers */
amdgpu_ring_write(ring, addr0 << 2);
amdgpu_ring_write(ring, addr1 << 2);
}
amdgpu_ring_write(ring, ref); /* reference */
amdgpu_ring_write(ring, mask); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(inv)); /* retry count, poll interval */
}
/**
* sdma_v4_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
*
* @ring: amdgpu ring pointer
*
* Emit an hdp flush packet on the requested DMA ring.
*/
static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask = 0;
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
sdma_v4_0_wait_reg_mem(ring, 0, 1,
adev->nbio.funcs->get_hdp_flush_done_offset(adev),
adev->nbio.funcs->get_hdp_flush_req_offset(adev),
ref_and_mask, ref_and_mask, 10);
}
/**
* sdma_v4_0_ring_emit_fence - emit a fence on the DMA ring
*
* @ring: amdgpu ring pointer
* @addr: address
* @seq: sequence number
* @flags: fence related flags
*
* Add a DMA fence packet to the ring to write
* the fence seq number and DMA trap packet to generate
* an interrupt if needed (VEGA10).
*/
static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags)
{
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
/* write the fence */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
/* zero in first two bits */
BUG_ON(addr & 0x3);
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, lower_32_bits(seq));
/* optionally write high bits as well */
if (write64bit) {
addr += 4;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
/* zero in first two bits */
BUG_ON(addr & 0x3);
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(seq));
}
/* generate an interrupt */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}
/**
* sdma_v4_0_gfx_enable - enable the gfx async dma engines
*
* @adev: amdgpu_device pointer
* @enable: enable SDMA RB/IB
* control the gfx async dma ring buffers (VEGA10).
*/
static void sdma_v4_0_gfx_enable(struct amdgpu_device *adev, bool enable)
{
u32 rb_cntl, ib_cntl;
int i;
amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, enable ? 1 : 0);
WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, enable ? 1 : 0);
WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
}
}
/**
* sdma_v4_0_rlc_stop - stop the compute async dma engines
*
* @adev: amdgpu_device pointer
*
* Stop the compute async dma queues (VEGA10).
*/
static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev)
{
/* XXX todo */
}
/**
* sdma_v4_0_page_stop - stop the page async dma engines
*
* @adev: amdgpu_device pointer
*
* Stop the page async dma ring buffers (VEGA10).
*/
static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
{
u32 rb_cntl, ib_cntl;
int i;
amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
RB_ENABLE, 0);
WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
ib_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL);
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL,
IB_ENABLE, 0);
WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
}
}
/**
* sdma_v4_0_ctx_switch_enable - stop the async dma engines context switch
*
* @adev: amdgpu_device pointer
* @enable: enable/disable the DMA MEs context switch.
*
* Halt or unhalt the async dma engines context switch (VEGA10).
*/
static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
{
u32 f32_cntl, phase_quantum = 0;
int i;
if (amdgpu_sdma_phase_quantum) {
unsigned value = amdgpu_sdma_phase_quantum;
unsigned unit = 0;
while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
value = (value + 1) >> 1;
unit++;
}
if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
WARN_ONCE(1,
"clamping sdma_phase_quantum to %uK clock cycles\n",
value << unit);
}
phase_quantum =
value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
}
for (i = 0; i < adev->sdma.num_instances; i++) {
f32_cntl = RREG32_SDMA(i, mmSDMA0_CNTL);
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
AUTO_CTXSW_ENABLE, enable ? 1 : 0);
if (enable && amdgpu_sdma_phase_quantum) {
WREG32_SDMA(i, mmSDMA0_PHASE0_QUANTUM, phase_quantum);
WREG32_SDMA(i, mmSDMA0_PHASE1_QUANTUM, phase_quantum);
WREG32_SDMA(i, mmSDMA0_PHASE2_QUANTUM, phase_quantum);
}
WREG32_SDMA(i, mmSDMA0_CNTL, f32_cntl);
/*
* Enable SDMA utilization. Its only supported on
* Arcturus for the moment and firmware version 14
* and above.
*/
if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) &&
adev->sdma.instance[i].fw_version >= 14)
WREG32_SDMA(i, mmSDMA0_PUB_DUMMY_REG2, enable);
/* Extend page fault timeout to avoid interrupt storm */
WREG32_SDMA(i, mmSDMA0_UTCL1_TIMEOUT, 0x00800080);
}
}
/**
* sdma_v4_0_enable - stop the async dma engines
*
* @adev: amdgpu_device pointer
* @enable: enable/disable the DMA MEs.
*
* Halt or unhalt the async dma engines (VEGA10).
*/
static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
{
u32 f32_cntl;
int i;
if (!enable) {
sdma_v4_0_gfx_enable(adev, enable);
sdma_v4_0_rlc_stop(adev);
if (adev->sdma.has_page_queue)
sdma_v4_0_page_stop(adev);
}
for (i = 0; i < adev->sdma.num_instances; i++) {
f32_cntl = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
WREG32_SDMA(i, mmSDMA0_F32_CNTL, f32_cntl);
}
}
/*
* sdma_v4_0_rb_cntl - get parameters for rb_cntl
*/
static uint32_t sdma_v4_0_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl)
{
/* Set ring buffer size in dwords */
uint32_t rb_bufsz = order_base_2(ring->ring_size / 4);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
#ifdef __BIG_ENDIAN
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
RPTR_WRITEBACK_SWAP_ENABLE, 1);
#endif
return rb_cntl;
}
/**
* sdma_v4_0_gfx_resume - setup and start the async dma engines
*
* @adev: amdgpu_device pointer
* @i: instance to resume
*
* Set up the gfx DMA ring buffers and enable them (VEGA10).
* Returns 0 for success, error for failure.
*/
static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
{
struct amdgpu_ring *ring = &adev->sdma.instance[i].ring;
u32 rb_cntl, ib_cntl, wptr_poll_cntl;
u32 doorbell;
u32 doorbell_offset;
u64 wptr_gpu_addr;
rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl);
WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
/* Initialize the ring buffer's read and write pointers */
WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR, 0);
WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_HI, 0);
WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR, 0);
WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_HI, 0);
/* set the wb address whether it's enabled or not */
WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_ADDR_HI,
upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_ADDR_LO,
lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
RPTR_WRITEBACK_ENABLE, 1);
WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE, ring->gpu_addr >> 8);
WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE_HI, ring->gpu_addr >> 40);
ring->wptr = 0;
/* before programing wptr to a less value, need set minor_ptr_update first */
WREG32_SDMA(i, mmSDMA0_GFX_MINOR_PTR_UPDATE, 1);
doorbell = RREG32_SDMA(i, mmSDMA0_GFX_DOORBELL);
doorbell_offset = RREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET);
doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE,
ring->use_doorbell);
doorbell_offset = REG_SET_FIELD(doorbell_offset,
SDMA0_GFX_DOORBELL_OFFSET,
OFFSET, ring->doorbell_index);
WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL, doorbell);
WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET, doorbell_offset);
sdma_v4_0_ring_set_wptr(ring);
/* set minor_ptr_update to 0 after wptr programed */
WREG32_SDMA(i, mmSDMA0_GFX_MINOR_PTR_UPDATE, 0);
/* setup the wptr shadow polling */
wptr_gpu_addr = ring->wptr_gpu_addr;
WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO,
lower_32_bits(wptr_gpu_addr));
WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI,
upper_32_bits(wptr_gpu_addr));
wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL);
wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
SDMA0_GFX_RB_WPTR_POLL_CNTL,
F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
/* enable DMA RB */
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
#ifdef __BIG_ENDIAN
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
#endif
/* enable DMA IBs */
WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
}
/**
* sdma_v4_0_page_resume - setup and start the async dma engines
*
* @adev: amdgpu_device pointer
* @i: instance to resume
*
* Set up the page DMA ring buffers and enable them (VEGA10).
* Returns 0 for success, error for failure.
*/
static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i)
{
struct amdgpu_ring *ring = &adev->sdma.instance[i].page;
u32 rb_cntl, ib_cntl, wptr_poll_cntl;
u32 doorbell;
u32 doorbell_offset;
u64 wptr_gpu_addr;
rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl);
WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
/* Initialize the ring buffer's read and write pointers */
WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR, 0);
WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_HI, 0);
WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR, 0);
WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_HI, 0);
/* set the wb address whether it's enabled or not */
WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_ADDR_HI,
upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_ADDR_LO,
lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
RPTR_WRITEBACK_ENABLE, 1);
WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE, ring->gpu_addr >> 8);
WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE_HI, ring->gpu_addr >> 40);
ring->wptr = 0;
/* before programing wptr to a less value, need set minor_ptr_update first */
WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 1);
doorbell = RREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL);
doorbell_offset = RREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET);
doorbell = REG_SET_FIELD(doorbell, SDMA0_PAGE_DOORBELL, ENABLE,
ring->use_doorbell);
doorbell_offset = REG_SET_FIELD(doorbell_offset,
SDMA0_PAGE_DOORBELL_OFFSET,
OFFSET, ring->doorbell_index);
WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL, doorbell);
WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET, doorbell_offset);
/* paging queue doorbell range is setup at sdma_v4_0_gfx_resume */
sdma_v4_0_page_ring_set_wptr(ring);
/* set minor_ptr_update to 0 after wptr programed */
WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 0);
/* setup the wptr shadow polling */
wptr_gpu_addr = ring->wptr_gpu_addr;
WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_LO,
lower_32_bits(wptr_gpu_addr));
WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_HI,
upper_32_bits(wptr_gpu_addr));
wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL);
wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
SDMA0_PAGE_RB_WPTR_POLL_CNTL,
F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
/* enable DMA RB */
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL, RB_ENABLE, 1);
WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
ib_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL);
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, IB_ENABLE, 1);
#ifdef __BIG_ENDIAN
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, IB_SWAP_ENABLE, 1);
#endif
/* enable DMA IBs */
WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
}
static void
sdma_v4_1_update_power_gating(struct amdgpu_device *adev, bool enable)
{
uint32_t def, data;
if (enable && (adev->pg_flags & AMD_PG_SUPPORT_SDMA)) {
/* enable idle interrupt */
def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL));
data |= SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK;
if (data != def)
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL), data);
} else {
/* disable idle interrupt */
def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL));
data &= ~SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK;
if (data != def)
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL), data);
}
}
static void sdma_v4_1_init_power_gating(struct amdgpu_device *adev)
{
uint32_t def, data;
/* Enable HW based PG. */
def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
data |= SDMA0_POWER_CNTL__PG_CNTL_ENABLE_MASK;
if (data != def)
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
/* enable interrupt */
def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL));
data |= SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK;
if (data != def)
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL), data);
/* Configure hold time to filter in-valid power on/off request. Use default right now */
def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
data &= ~SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK;
data |= (mmSDMA0_POWER_CNTL_DEFAULT & SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK);
/* Configure switch time for hysteresis purpose. Use default right now */
data &= ~SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK;
data |= (mmSDMA0_POWER_CNTL_DEFAULT & SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK);
if(data != def)
WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
}
static void sdma_v4_0_init_pg(struct amdgpu_device *adev)
{
if (!(adev->pg_flags & AMD_PG_SUPPORT_SDMA))
return;
switch (adev->ip_versions[SDMA0_HWIP][0]) {
case IP_VERSION(4, 1, 0):
case IP_VERSION(4, 1, 1):
case IP_VERSION(4, 1, 2):
sdma_v4_1_init_power_gating(adev);
sdma_v4_1_update_power_gating(adev, true);
break;
default:
break;
}
}
/**
* sdma_v4_0_rlc_resume - setup and start the async dma engines
*
* @adev: amdgpu_device pointer
*
* Set up the compute DMA queues and enable them (VEGA10).
* Returns 0 for success, error for failure.
*/
static int sdma_v4_0_rlc_resume(struct amdgpu_device *adev)
{
sdma_v4_0_init_pg(adev);
return 0;
}
/**
* sdma_v4_0_load_microcode - load the sDMA ME ucode
*
* @adev: amdgpu_device pointer
*
* Loads the sDMA0/1 ucode.
* Returns 0 for success, -EINVAL if the ucode is not available.
*/
static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
{
const struct sdma_firmware_header_v1_0 *hdr;
const __le32 *fw_data;
u32 fw_size;
int i, j;
/* halt the MEs */
sdma_v4_0_enable(adev, false);
for (i = 0; i < adev->sdma.num_instances; i++) {
if (!adev->sdma.instance[i].fw)
return -EINVAL;
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
amdgpu_ucode_print_sdma_hdr(&hdr->header);
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
fw_data = (const __le32 *)
(adev->sdma.instance[i].fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
WREG32_SDMA(i, mmSDMA0_UCODE_ADDR, 0);
for (j = 0; j < fw_size; j++)
WREG32_SDMA(i, mmSDMA0_UCODE_DATA,
le32_to_cpup(fw_data++));
WREG32_SDMA(i, mmSDMA0_UCODE_ADDR,
adev->sdma.instance[i].fw_version);
}
return 0;
}
/**
* sdma_v4_0_start - setup and start the async dma engines
*
* @adev: amdgpu_device pointer
*
* Set up the DMA engines and enable them (VEGA10).
* Returns 0 for success, error for failure.
*/
static int sdma_v4_0_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
int i, r = 0;
if (amdgpu_sriov_vf(adev)) {
sdma_v4_0_ctx_switch_enable(adev, false);
sdma_v4_0_enable(adev, false);
} else {
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
r = sdma_v4_0_load_microcode(adev);
if (r)
return r;
}
/* unhalt the MEs */
sdma_v4_0_enable(adev, true);
/* enable sdma ring preemption */
sdma_v4_0_ctx_switch_enable(adev, true);
}
/* start the gfx rings and rlc compute queues */
for (i = 0; i < adev->sdma.num_instances; i++) {
uint32_t temp;
WREG32_SDMA(i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL, 0);
sdma_v4_0_gfx_resume(adev, i);
if (adev->sdma.has_page_queue)
sdma_v4_0_page_resume(adev, i);
/* set utc l1 enable flag always to 1 */
temp = RREG32_SDMA(i, mmSDMA0_CNTL);
temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
WREG32_SDMA(i, mmSDMA0_CNTL, temp);
if (!amdgpu_sriov_vf(adev)) {
/* unhalt engine */
temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
WREG32_SDMA(i, mmSDMA0_F32_CNTL, temp);
}
}
if (amdgpu_sriov_vf(adev)) {
sdma_v4_0_ctx_switch_enable(adev, true);
sdma_v4_0_enable(adev, true);
} else {
r = sdma_v4_0_rlc_resume(adev);
if (r)
return r;
}
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
if (adev->sdma.has_page_queue) {
struct amdgpu_ring *page = &adev->sdma.instance[i].page;
r = amdgpu_ring_test_helper(page);
if (r)
return r;
if (adev->mman.buffer_funcs_ring == page)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
return r;
}
/**
* sdma_v4_0_ring_test_ring - simple async dma engine test
*
* @ring: amdgpu_ring structure holding ring information
*
* Test the DMA engine by writing using it to write an
* value to memory. (VEGA10).
* Returns 0 for success, error for failure.
*/
static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
unsigned i;
unsigned index;
int r;
u32 tmp;
u64 gpu_addr;
r = amdgpu_device_wb_get(adev, &index);
if (r)
return r;
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
r = amdgpu_ring_alloc(ring, 5);
if (r)
goto error_free_wb;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
udelay(1);
}
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
error_free_wb:
amdgpu_device_wb_free(adev, index);
return r;
}
/**
* sdma_v4_0_ring_test_ib - test an IB on the DMA engine
*
* @ring: amdgpu_ring structure holding ring information
* @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
*
* Test a simple IB in the DMA ring (VEGA10).
* Returns 0 on success, error on failure.
*/
static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct dma_fence *f = NULL;
unsigned index;
long r;
u32 tmp = 0;
u64 gpu_addr;
r = amdgpu_device_wb_get(adev, &index);
if (r)
return r;
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256,
AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err0;
ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
ib.ptr[1] = lower_32_bits(gpu_addr);
ib.ptr[2] = upper_32_bits(gpu_addr);
ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0);
ib.ptr[4] = 0xDEADBEEF;
ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
ib.length_dw = 8;
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
goto err1;
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
r = -ETIMEDOUT;
goto err1;
} else if (r < 0) {
goto err1;
}
tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
r = 0;
else
r = -EINVAL;
err1:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err0:
amdgpu_device_wb_free(adev, index);
return r;
}
/**
* sdma_v4_0_vm_copy_pte - update PTEs by copying them from the GART
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @src: src addr to copy from
* @count: number of page entries to update
*
* Update PTEs by copying them from the GART using sDMA (VEGA10).
*/
static void sdma_v4_0_vm_copy_pte(struct amdgpu_ib *ib,
uint64_t pe, uint64_t src,
unsigned count)
{
unsigned bytes = count * 8;
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
ib->ptr[ib->length_dw++] = bytes - 1;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src);
ib->ptr[ib->length_dw++] = upper_32_bits(src);
ib->ptr[ib->length_dw++] = lower_32_bits(pe);
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
}
/**
* sdma_v4_0_vm_write_pte - update PTEs by writing them manually
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @value: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
*
* Update PTEs by writing them manually using sDMA (VEGA10).
*/
static void sdma_v4_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
uint64_t value, unsigned count,
uint32_t incr)
{
unsigned ndw = count * 2;
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
ib->ptr[ib->length_dw++] = lower_32_bits(pe);
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = ndw - 1;
for (; ndw > 0; ndw -= 2) {
ib->ptr[ib->length_dw++] = lower_32_bits(value);
ib->ptr[ib->length_dw++] = upper_32_bits(value);
value += incr;
}
}
/**
* sdma_v4_0_vm_set_pte_pde - update the page tables using sDMA
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: access flags
*
* Update the page tables using sDMA (VEGA10).
*/
static void sdma_v4_0_vm_set_pte_pde(struct amdgpu_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint64_t flags)
{
/* for physically contiguous pages (vram) */
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE);
ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
ib->ptr[ib->length_dw++] = upper_32_bits(flags);
ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
ib->ptr[ib->length_dw++] = incr; /* increment size */
ib->ptr[ib->length_dw++] = 0;
ib->ptr[ib->length_dw++] = count - 1; /* number of entries */
}
/**
* sdma_v4_0_ring_pad_ib - pad the IB to the required number of dw
*
* @ring: amdgpu_ring structure holding ring information
* @ib: indirect buffer to fill with padding
*/
static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
u32 pad_count;
int i;
pad_count = (-ib->length_dw) & 7;
for (i = 0; i < pad_count; i++)
if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =
SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
else
ib->ptr[ib->length_dw++] =
SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
}
/**
* sdma_v4_0_ring_emit_pipeline_sync - sync the pipeline
*
* @ring: amdgpu_ring pointer
*
* Make sure all previous operations are completed (CIK).
*/
static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
/* wait for idle */
sdma_v4_0_wait_reg_mem(ring, 1, 0,
addr & 0xfffffffc,
upper_32_bits(addr) & 0xffffffff,
seq, 0xffffffff, 4);
}
/**
* sdma_v4_0_ring_emit_vm_flush - vm flush using sDMA
*
* @ring: amdgpu_ring pointer
* @vmid: vmid number to use
* @pd_addr: address
*
* Update the page table base and flush the VM TLB
* using sDMA (VEGA10).
*/
static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
}
static void sdma_v4_0_ring_emit_wreg(struct amdgpu_ring *ring,
uint32_t reg, uint32_t val)
{
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, val);
}
static void sdma_v4_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val, uint32_t mask)
{
sdma_v4_0_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10);
}
static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev)
{
uint fw_version = adev->sdma.instance[0].fw_version;
switch (adev->ip_versions[SDMA0_HWIP][0]) {
case IP_VERSION(4, 0, 0):
return fw_version >= 430;
case IP_VERSION(4, 0, 1):
/*return fw_version >= 31;*/
return false;
case IP_VERSION(4, 2, 0):
return fw_version >= 123;
default:
return false;
}
}
static int sdma_v4_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
r = sdma_v4_0_init_microcode(adev);
if (r) {
DRM_ERROR("Failed to load sdma firmware!\n");
return r;
}
/* TODO: Page queue breaks driver reload under SRIOV */
if ((adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 0, 0)) &&
amdgpu_sriov_vf((adev)))
adev->sdma.has_page_queue = false;
else if (sdma_v4_0_fw_support_paging_queue(adev))
adev->sdma.has_page_queue = true;
sdma_v4_0_set_ring_funcs(adev);
sdma_v4_0_set_buffer_funcs(adev);
sdma_v4_0_set_vm_pte_funcs(adev);
sdma_v4_0_set_irq_funcs(adev);
sdma_v4_0_set_ras_funcs(adev);
return 0;
}
static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
void *err_data,
struct amdgpu_iv_entry *entry);
static int sdma_v4_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
sdma_v4_0_setup_ulv(adev);
if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
if (adev->sdma.ras && adev->sdma.ras->ras_block.hw_ops &&
adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count)
adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count(adev);
}
return 0;
}
static int sdma_v4_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SDMA trap event */
for (i = 0; i < adev->sdma.num_instances; i++) {
r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
SDMA0_4_0__SRCID__SDMA_TRAP,
&adev->sdma.trap_irq);
if (r)
return r;
}
/* SDMA SRAM ECC event */
for (i = 0; i < adev->sdma.num_instances; i++) {
r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
SDMA0_4_0__SRCID__SDMA_SRAM_ECC,
&adev->sdma.ecc_irq);
if (r)
return r;
}
/* SDMA VM_HOLE/DOORBELL_INV/POLL_TIMEOUT/SRBM_WRITE_PROTECTION event*/
for (i = 0; i < adev->sdma.num_instances; i++) {
r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
SDMA0_4_0__SRCID__SDMA_VM_HOLE,
&adev->sdma.vm_hole_irq);
if (r)
return r;
r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
SDMA0_4_0__SRCID__SDMA_DOORBELL_INVALID,
&adev->sdma.doorbell_invalid_irq);
if (r)
return r;
r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
SDMA0_4_0__SRCID__SDMA_POLL_TIMEOUT,
&adev->sdma.pool_timeout_irq);
if (r)
return r;
r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
SDMA0_4_0__SRCID__SDMA_SRBMWRITE,
&adev->sdma.srbm_write_irq);
if (r)
return r;
}
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
ring->use_doorbell = true;
DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
ring->use_doorbell?"true":"false");
/* doorbell size is 2 dwords, get DWORD offset */
ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
/*
* On Arcturus, SDMA instance 5~7 has a different vmhub
* type(AMDGPU_MMHUB1).
*/
if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5)
ring->vm_hub = AMDGPU_MMHUB1(0);
else
ring->vm_hub = AMDGPU_MMHUB0(0);
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
AMDGPU_SDMA_IRQ_INSTANCE0 + i,
AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
if (adev->sdma.has_page_queue) {
ring = &adev->sdma.instance[i].page;
ring->ring_obj = NULL;
ring->use_doorbell = true;
/* paging queue use same doorbell index/routing as gfx queue
* with 0x400 (4096 dwords) offset on second doorbell page
*/
if (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(4, 0, 0) &&
adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(4, 2, 0)) {
ring->doorbell_index =
adev->doorbell_index.sdma_engine[i] << 1;
ring->doorbell_index += 0x400;
} else {
/* From vega20, the sdma_doorbell_range in 1st
* doorbell page is reserved for page queue.
*/
ring->doorbell_index =
(adev->doorbell_index.sdma_engine[i] + 1) << 1;
}
if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5)
ring->vm_hub = AMDGPU_MMHUB1(0);
else
ring->vm_hub = AMDGPU_MMHUB0(0);
sprintf(ring->name, "page%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
AMDGPU_SDMA_IRQ_INSTANCE0 + i,
AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
}
if (amdgpu_sdma_ras_sw_init(adev)) {
dev_err(adev->dev, "Failed to initialize sdma ras block!\n");
return -EINVAL;
}
return r;
}
static int sdma_v4_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
if (adev->sdma.has_page_queue)
amdgpu_ring_fini(&adev->sdma.instance[i].page);
}
if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) ||
adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0))
amdgpu_sdma_destroy_inst_ctx(adev, true);
else
amdgpu_sdma_destroy_inst_ctx(adev, false);
return 0;
}
static int sdma_v4_0_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->flags & AMD_IS_APU)
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false);
if (!amdgpu_sriov_vf(adev))
sdma_v4_0_init_golden_registers(adev);
return sdma_v4_0_start(adev);
}
static int sdma_v4_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
if (amdgpu_sriov_vf(adev)) {
/* disable the scheduler for SDMA */
amdgpu_sdma_unset_buffer_funcs_helper(adev);
return 0;
}
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
AMDGPU_SDMA_IRQ_INSTANCE0 + i);
}
}
sdma_v4_0_ctx_switch_enable(adev, false);
sdma_v4_0_enable(adev, false);
if (adev->flags & AMD_IS_APU)
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, true);
return 0;
}
static int sdma_v4_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SMU saves SDMA state for us */
if (adev->in_s0ix) {
sdma_v4_0_gfx_enable(adev, false);
return 0;
}
return sdma_v4_0_hw_fini(adev);
}
static int sdma_v4_0_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SMU restores SDMA state for us */
if (adev->in_s0ix) {
sdma_v4_0_enable(adev, true);
sdma_v4_0_gfx_enable(adev, true);
amdgpu_ttm_set_buffer_funcs_status(adev, true);
return 0;
}
return sdma_v4_0_hw_init(adev);
}
static bool sdma_v4_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 i;
for (i = 0; i < adev->sdma.num_instances; i++) {
u32 tmp = RREG32_SDMA(i, mmSDMA0_STATUS_REG);
if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
return false;
}
return true;
}
static int sdma_v4_0_wait_for_idle(void *handle)
{
unsigned i, j;
u32 sdma[AMDGPU_MAX_SDMA_INSTANCES];
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
for (j = 0; j < adev->sdma.num_instances; j++) {
sdma[j] = RREG32_SDMA(j, mmSDMA0_STATUS_REG);
if (!(sdma[j] & SDMA0_STATUS_REG__IDLE_MASK))
break;
}
if (j == adev->sdma.num_instances)
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
static int sdma_v4_0_soft_reset(void *handle)
{
/* todo */
return 0;
}
static int sdma_v4_0_set_trap_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
u32 sdma_cntl;
sdma_cntl = RREG32_SDMA(type, mmSDMA0_CNTL);
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
WREG32_SDMA(type, mmSDMA0_CNTL, sdma_cntl);
return 0;
}
static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
uint32_t instance;
DRM_DEBUG("IH: SDMA trap\n");
instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
switch (entry->ring_id) {
case 0:
amdgpu_fence_process(&adev->sdma.instance[instance].ring);
break;
case 1:
if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 0))
amdgpu_fence_process(&adev->sdma.instance[instance].page);
break;
case 2:
/* XXX compute */
break;
case 3:
if (adev->ip_versions[SDMA0_HWIP][0] != IP_VERSION(4, 2, 0))
amdgpu_fence_process(&adev->sdma.instance[instance].page);
break;
}
return 0;
}
static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
void *err_data,
struct amdgpu_iv_entry *entry)
{
int instance;
/* When “Full RAS” is enabled, the per-IP interrupt sources should
* be disabled and the driver should only look for the aggregated
* interrupt via sync flood
*/
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
goto out;
instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
if (instance < 0)
goto out;
amdgpu_sdma_process_ras_data_cb(adev, err_data, entry);
out:
return AMDGPU_RAS_SUCCESS;
}
static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
int instance;
DRM_ERROR("Illegal instruction in SDMA command stream\n");
instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
if (instance < 0)
return 0;
switch (entry->ring_id) {
case 0:
drm_sched_fault(&adev->sdma.instance[instance].ring.sched);
break;
}
return 0;
}
static int sdma_v4_0_set_ecc_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
u32 sdma_edc_config;
sdma_edc_config = RREG32_SDMA(type, mmSDMA0_EDC_CONFIG);
sdma_edc_config = REG_SET_FIELD(sdma_edc_config, SDMA0_EDC_CONFIG, ECC_INT_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
WREG32_SDMA(type, mmSDMA0_EDC_CONFIG, sdma_edc_config);
return 0;
}
static int sdma_v4_0_print_iv_entry(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
int instance;
struct amdgpu_task_info task_info;
u64 addr;
instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
if (instance < 0 || instance >= adev->sdma.num_instances) {
dev_err(adev->dev, "sdma instance invalid %d\n", instance);
return -EINVAL;
}
addr = (u64)entry->src_data[0] << 12;
addr |= ((u64)entry->src_data[1] & 0xf) << 44;
memset(&task_info, 0, sizeof(struct amdgpu_task_info));
amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
dev_dbg_ratelimited(adev->dev,
"[sdma%d] address:0x%016llx src_id:%u ring:%u vmid:%u "
"pasid:%u, for process %s pid %d thread %s pid %d\n",
instance, addr, entry->src_id, entry->ring_id, entry->vmid,
entry->pasid, task_info.process_name, task_info.tgid,
task_info.task_name, task_info.pid);
return 0;
}
static int sdma_v4_0_process_vm_hole_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
dev_dbg_ratelimited(adev->dev, "MC or SEM address in VM hole\n");
sdma_v4_0_print_iv_entry(adev, entry);
return 0;
}
static int sdma_v4_0_process_doorbell_invalid_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
dev_dbg_ratelimited(adev->dev, "SDMA received a doorbell from BIF with byte_enable !=0xff\n");
sdma_v4_0_print_iv_entry(adev, entry);
return 0;
}
static int sdma_v4_0_process_pool_timeout_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
dev_dbg_ratelimited(adev->dev,
"Polling register/memory timeout executing POLL_REG/MEM with finite timer\n");
sdma_v4_0_print_iv_entry(adev, entry);
return 0;
}
static int sdma_v4_0_process_srbm_write_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
dev_dbg_ratelimited(adev->dev,
"SDMA gets an Register Write SRBM_WRITE command in non-privilege command buffer\n");
sdma_v4_0_print_iv_entry(adev, entry);
return 0;
}
static void sdma_v4_0_update_medium_grain_clock_gating(
struct amdgpu_device *adev,
bool enable)
{
uint32_t data, def;
int i;
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
for (i = 0; i < adev->sdma.num_instances; i++) {
def = data = RREG32_SDMA(i, mmSDMA0_CLK_CTRL);
data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
if (def != data)
WREG32_SDMA(i, mmSDMA0_CLK_CTRL, data);
}
} else {
for (i = 0; i < adev->sdma.num_instances; i++) {
def = data = RREG32_SDMA(i, mmSDMA0_CLK_CTRL);
data |= (SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
if (def != data)
WREG32_SDMA(i, mmSDMA0_CLK_CTRL, data);
}
}
}
static void sdma_v4_0_update_medium_grain_light_sleep(
struct amdgpu_device *adev,
bool enable)
{
uint32_t data, def;
int i;
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
for (i = 0; i < adev->sdma.num_instances; i++) {
/* 1-not override: enable sdma mem light sleep */
def = data = RREG32_SDMA(0, mmSDMA0_POWER_CNTL);
data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (def != data)
WREG32_SDMA(0, mmSDMA0_POWER_CNTL, data);
}
} else {
for (i = 0; i < adev->sdma.num_instances; i++) {
/* 0-override:disable sdma mem light sleep */
def = data = RREG32_SDMA(0, mmSDMA0_POWER_CNTL);
data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (def != data)
WREG32_SDMA(0, mmSDMA0_POWER_CNTL, data);
}
}
}
static int sdma_v4_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (amdgpu_sriov_vf(adev))
return 0;
sdma_v4_0_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
sdma_v4_0_update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE);
return 0;
}
static int sdma_v4_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
switch (adev->ip_versions[SDMA0_HWIP][0]) {
case IP_VERSION(4, 1, 0):
case IP_VERSION(4, 1, 1):
case IP_VERSION(4, 1, 2):
sdma_v4_1_update_power_gating(adev,
state == AMD_PG_STATE_GATE);
break;
default:
break;
}
return 0;
}
static void sdma_v4_0_get_clockgating_state(void *handle, u64 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int data;
if (amdgpu_sriov_vf(adev))
*flags = 0;
/* AMD_CG_SUPPORT_SDMA_MGCG */
data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL));
if (!(data & SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK))
*flags |= AMD_CG_SUPPORT_SDMA_MGCG;
/* AMD_CG_SUPPORT_SDMA_LS */
data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
*flags |= AMD_CG_SUPPORT_SDMA_LS;
}
const struct amd_ip_funcs sdma_v4_0_ip_funcs = {
.name = "sdma_v4_0",
.early_init = sdma_v4_0_early_init,
.late_init = sdma_v4_0_late_init,
.sw_init = sdma_v4_0_sw_init,
.sw_fini = sdma_v4_0_sw_fini,
.hw_init = sdma_v4_0_hw_init,
.hw_fini = sdma_v4_0_hw_fini,
.suspend = sdma_v4_0_suspend,
.resume = sdma_v4_0_resume,
.is_idle = sdma_v4_0_is_idle,
.wait_for_idle = sdma_v4_0_wait_for_idle,
.soft_reset = sdma_v4_0_soft_reset,
.set_clockgating_state = sdma_v4_0_set_clockgating_state,
.set_powergating_state = sdma_v4_0_set_powergating_state,
.get_clockgating_state = sdma_v4_0_get_clockgating_state,
};
static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
.type = AMDGPU_RING_TYPE_SDMA,
.align_mask = 0xff,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
.secure_submission_supported = true,
.get_rptr = sdma_v4_0_ring_get_rptr,
.get_wptr = sdma_v4_0_ring_get_wptr,
.set_wptr = sdma_v4_0_ring_set_wptr,
.emit_frame_size =
6 + /* sdma_v4_0_ring_emit_hdp_flush */
3 + /* hdp invalidate */
6 + /* sdma_v4_0_ring_emit_pipeline_sync */
/* sdma_v4_0_ring_emit_vm_flush */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
.emit_ib = sdma_v4_0_ring_emit_ib,
.emit_fence = sdma_v4_0_ring_emit_fence,
.emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
.emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
.emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
.test_ring = sdma_v4_0_ring_test_ring,
.test_ib = sdma_v4_0_ring_test_ib,
.insert_nop = sdma_v4_0_ring_insert_nop,
.pad_ib = sdma_v4_0_ring_pad_ib,
.emit_wreg = sdma_v4_0_ring_emit_wreg,
.emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs = {
.type = AMDGPU_RING_TYPE_SDMA,
.align_mask = 0xff,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = true,
.secure_submission_supported = true,
.get_rptr = sdma_v4_0_ring_get_rptr,
.get_wptr = sdma_v4_0_page_ring_get_wptr,
.set_wptr = sdma_v4_0_page_ring_set_wptr,
.emit_frame_size =
6 + /* sdma_v4_0_ring_emit_hdp_flush */
3 + /* hdp invalidate */
6 + /* sdma_v4_0_ring_emit_pipeline_sync */
/* sdma_v4_0_ring_emit_vm_flush */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
.emit_ib = sdma_v4_0_ring_emit_ib,
.emit_fence = sdma_v4_0_ring_emit_fence,
.emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
.emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
.emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
.test_ring = sdma_v4_0_ring_test_ring,
.test_ib = sdma_v4_0_ring_test_ib,
.insert_nop = sdma_v4_0_ring_insert_nop,
.pad_ib = sdma_v4_0_ring_pad_ib,
.emit_wreg = sdma_v4_0_ring_emit_wreg,
.emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs;
adev->sdma.instance[i].ring.me = i;
if (adev->sdma.has_page_queue) {
adev->sdma.instance[i].page.funcs =
&sdma_v4_0_page_ring_funcs;
adev->sdma.instance[i].page.me = i;
}
}
}
static const struct amdgpu_irq_src_funcs sdma_v4_0_trap_irq_funcs = {
.set = sdma_v4_0_set_trap_irq_state,
.process = sdma_v4_0_process_trap_irq,
};
static const struct amdgpu_irq_src_funcs sdma_v4_0_illegal_inst_irq_funcs = {
.process = sdma_v4_0_process_illegal_inst_irq,
};
static const struct amdgpu_irq_src_funcs sdma_v4_0_ecc_irq_funcs = {
.set = sdma_v4_0_set_ecc_irq_state,
.process = amdgpu_sdma_process_ecc_irq,
};
static const struct amdgpu_irq_src_funcs sdma_v4_0_vm_hole_irq_funcs = {
.process = sdma_v4_0_process_vm_hole_irq,
};
static const struct amdgpu_irq_src_funcs sdma_v4_0_doorbell_invalid_irq_funcs = {
.process = sdma_v4_0_process_doorbell_invalid_irq,
};
static const struct amdgpu_irq_src_funcs sdma_v4_0_pool_timeout_irq_funcs = {
.process = sdma_v4_0_process_pool_timeout_irq,
};
static const struct amdgpu_irq_src_funcs sdma_v4_0_srbm_write_irq_funcs = {
.process = sdma_v4_0_process_srbm_write_irq,
};
static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->sdma.trap_irq.num_types = adev->sdma.num_instances;
adev->sdma.ecc_irq.num_types = adev->sdma.num_instances;
/*For Arcturus and Aldebaran, add another 4 irq handler*/
switch (adev->sdma.num_instances) {
case 5:
case 8:
adev->sdma.vm_hole_irq.num_types = adev->sdma.num_instances;
adev->sdma.doorbell_invalid_irq.num_types = adev->sdma.num_instances;
adev->sdma.pool_timeout_irq.num_types = adev->sdma.num_instances;
adev->sdma.srbm_write_irq.num_types = adev->sdma.num_instances;
break;
default:
break;
}
adev->sdma.trap_irq.funcs = &sdma_v4_0_trap_irq_funcs;
adev->sdma.illegal_inst_irq.funcs = &sdma_v4_0_illegal_inst_irq_funcs;
adev->sdma.ecc_irq.funcs = &sdma_v4_0_ecc_irq_funcs;
adev->sdma.vm_hole_irq.funcs = &sdma_v4_0_vm_hole_irq_funcs;
adev->sdma.doorbell_invalid_irq.funcs = &sdma_v4_0_doorbell_invalid_irq_funcs;
adev->sdma.pool_timeout_irq.funcs = &sdma_v4_0_pool_timeout_irq_funcs;
adev->sdma.srbm_write_irq.funcs = &sdma_v4_0_srbm_write_irq_funcs;
}
/**
* sdma_v4_0_emit_copy_buffer - copy buffer using the sDMA engine
*
* @ib: indirect buffer to copy to
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
* @tmz: if a secure copy should be used
*
* Copy GPU buffers using the DMA engine (VEGA10/12).
* Used by the amdgpu ttm implementation to move pages if
* registered as the asic copy callback.
*/
static void sdma_v4_0_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
uint32_t byte_count,
bool tmz)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
ib->ptr[ib->length_dw++] = byte_count - 1;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
}
/**
* sdma_v4_0_emit_fill_buffer - fill buffer using the sDMA engine
*
* @ib: indirect buffer to copy to
* @src_data: value to write to buffer
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
*
* Fill GPU buffers using the DMA engine (VEGA10/12).
*/
static void sdma_v4_0_emit_fill_buffer(struct amdgpu_ib *ib,
uint32_t src_data,
uint64_t dst_offset,
uint32_t byte_count)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = src_data;
ib->ptr[ib->length_dw++] = byte_count - 1;
}
static const struct amdgpu_buffer_funcs sdma_v4_0_buffer_funcs = {
.copy_max_bytes = 0x400000,
.copy_num_dw = 7,
.emit_copy_buffer = sdma_v4_0_emit_copy_buffer,
.fill_max_bytes = 0x400000,
.fill_num_dw = 5,
.emit_fill_buffer = sdma_v4_0_emit_fill_buffer,
};
static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
{
adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
if (adev->sdma.has_page_queue)
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page;
else
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
.copy_pte_num_dw = 7,
.copy_pte = sdma_v4_0_vm_copy_pte,
.write_pte = sdma_v4_0_vm_write_pte,
.set_pte_pde = sdma_v4_0_vm_set_pte_pde,
};
static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
{
struct drm_gpu_scheduler *sched;
unsigned i;
adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) {
if (adev->sdma.has_page_queue)
sched = &adev->sdma.instance[i].page.sched;
else
sched = &adev->sdma.instance[i].ring.sched;
adev->vm_manager.vm_pte_scheds[i] = sched;
}
adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
}
static void sdma_v4_0_get_ras_error_count(uint32_t value,
uint32_t instance,
uint32_t *sec_count)
{
uint32_t i;
uint32_t sec_cnt;
/* double bits error (multiple bits) error detection is not supported */
for (i = 0; i < ARRAY_SIZE(sdma_v4_0_ras_fields); i++) {
/* the SDMA_EDC_COUNTER register in each sdma instance
* shares the same sed shift_mask
* */
sec_cnt = (value &
sdma_v4_0_ras_fields[i].sec_count_mask) >>
sdma_v4_0_ras_fields[i].sec_count_shift;
if (sec_cnt) {
DRM_INFO("Detected %s in SDMA%d, SED %d\n",
sdma_v4_0_ras_fields[i].name,
instance, sec_cnt);
*sec_count += sec_cnt;
}
}
}
static int sdma_v4_0_query_ras_error_count_by_instance(struct amdgpu_device *adev,
uint32_t instance, void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
uint32_t sec_count = 0;
uint32_t reg_value = 0;
reg_value = RREG32_SDMA(instance, mmSDMA0_EDC_COUNTER);
/* double bit error is not supported */
if (reg_value)
sdma_v4_0_get_ras_error_count(reg_value,
instance, &sec_count);
/* err_data->ce_count should be initialized to 0
* before calling into this function */
err_data->ce_count += sec_count;
/* double bit error is not supported
* set ue count to 0 */
err_data->ue_count = 0;
return 0;
};
static void sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status)
{
int i = 0;
for (i = 0; i < adev->sdma.num_instances; i++) {
if (sdma_v4_0_query_ras_error_count_by_instance(adev, i, ras_error_status)) {
dev_err(adev->dev, "Query ras error count failed in SDMA%d\n", i);
return;
}
}
}
static void sdma_v4_0_reset_ras_error_count(struct amdgpu_device *adev)
{
int i;
/* read back edc counter registers to clear the counters */
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
for (i = 0; i < adev->sdma.num_instances; i++)
RREG32_SDMA(i, mmSDMA0_EDC_COUNTER);
}
}
const struct amdgpu_ras_block_hw_ops sdma_v4_0_ras_hw_ops = {
.query_ras_error_count = sdma_v4_0_query_ras_error_count,
.reset_ras_error_count = sdma_v4_0_reset_ras_error_count,
};
static struct amdgpu_sdma_ras sdma_v4_0_ras = {
.ras_block = {
.hw_ops = &sdma_v4_0_ras_hw_ops,
.ras_cb = sdma_v4_0_process_ras_data_cb,
},
};
static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev)
{
switch (adev->ip_versions[SDMA0_HWIP][0]) {
case IP_VERSION(4, 2, 0):
case IP_VERSION(4, 2, 2):
adev->sdma.ras = &sdma_v4_0_ras;
break;
case IP_VERSION(4, 4, 0):
adev->sdma.ras = &sdma_v4_4_ras;
break;
default:
break;
}
}
const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_SDMA,
.major = 4,
.minor = 0,
.rev = 0,
.funcs = &sdma_v4_0_ip_funcs,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c |
/*
* Copyright 2007-8 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
*/
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_i2c.h"
#include "atom.h"
#include "amdgpu_connectors.h"
#include "amdgpu_display.h"
#include "soc15_common.h"
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
#include <asm/div64.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_vblank.h>
/**
* amdgpu_display_hotplug_work_func - work handler for display hotplug event
*
* @work: work struct pointer
*
* This is the hotplug event work handler (all ASICs).
* The work gets scheduled from the IRQ handler if there
* was a hotplug interrupt. It walks through the connector table
* and calls hotplug handler for each connector. After this, it sends
* a DRM hotplug event to alert userspace.
*
* This design approach is required in order to defer hotplug event handling
* from the IRQ handler to a work handler because hotplug handler has to use
* mutexes which cannot be locked in an IRQ handler (since &mutex_lock may
* sleep).
*/
void amdgpu_display_hotplug_work_func(struct work_struct *work)
{
struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
hotplug_work.work);
struct drm_device *dev = adev_to_drm(adev);
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
mutex_lock(&mode_config->mutex);
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter)
amdgpu_connector_hotplug(connector);
drm_connector_list_iter_end(&iter);
mutex_unlock(&mode_config->mutex);
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
}
static int amdgpu_display_framebuffer_init(struct drm_device *dev,
struct amdgpu_framebuffer *rfb,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj);
static void amdgpu_display_flip_callback(struct dma_fence *f,
struct dma_fence_cb *cb)
{
struct amdgpu_flip_work *work =
container_of(cb, struct amdgpu_flip_work, cb);
dma_fence_put(f);
schedule_work(&work->flip_work.work);
}
static bool amdgpu_display_flip_handle_fence(struct amdgpu_flip_work *work,
struct dma_fence **f)
{
struct dma_fence *fence = *f;
if (fence == NULL)
return false;
*f = NULL;
if (!dma_fence_add_callback(fence, &work->cb,
amdgpu_display_flip_callback))
return true;
dma_fence_put(fence);
return false;
}
static void amdgpu_display_flip_work_func(struct work_struct *__work)
{
struct delayed_work *delayed_work =
container_of(__work, struct delayed_work, work);
struct amdgpu_flip_work *work =
container_of(delayed_work, struct amdgpu_flip_work, flip_work);
struct amdgpu_device *adev = work->adev;
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id];
struct drm_crtc *crtc = &amdgpu_crtc->base;
unsigned long flags;
unsigned int i;
int vpos, hpos;
for (i = 0; i < work->shared_count; ++i)
if (amdgpu_display_flip_handle_fence(work, &work->shared[i]))
return;
/* Wait until we're out of the vertical blank period before the one
* targeted by the flip
*/
if (amdgpu_crtc->enabled &&
(amdgpu_display_get_crtc_scanoutpos(adev_to_drm(adev), work->crtc_id, 0,
&vpos, &hpos, NULL, NULL,
&crtc->hwmode)
& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
(int)(work->target_vblank -
amdgpu_get_vblank_counter_kms(crtc)) > 0) {
schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
return;
}
/* We borrow the event spin lock for protecting flip_status */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
/* Do the flip (mmio) */
adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
/* Set the flip status */
amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
drm_dbg_vbl(adev_to_drm(adev),
"crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
amdgpu_crtc->crtc_id, amdgpu_crtc, work);
}
/*
* Handle unpin events outside the interrupt handler proper.
*/
static void amdgpu_display_unpin_work_func(struct work_struct *__work)
{
struct amdgpu_flip_work *work =
container_of(__work, struct amdgpu_flip_work, unpin_work);
int r;
/* unpin of the old buffer */
r = amdgpu_bo_reserve(work->old_abo, true);
if (likely(r == 0)) {
amdgpu_bo_unpin(work->old_abo);
amdgpu_bo_unreserve(work->old_abo);
} else
DRM_ERROR("failed to reserve buffer after flip\n");
amdgpu_bo_unref(&work->old_abo);
kfree(work->shared);
kfree(work);
}
int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags, uint32_t target,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_gem_object *obj;
struct amdgpu_flip_work *work;
struct amdgpu_bo *new_abo;
unsigned long flags;
u64 tiling_flags;
int i, r;
work = kzalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL)
return -ENOMEM;
INIT_DELAYED_WORK(&work->flip_work, amdgpu_display_flip_work_func);
INIT_WORK(&work->unpin_work, amdgpu_display_unpin_work_func);
work->event = event;
work->adev = adev;
work->crtc_id = amdgpu_crtc->crtc_id;
work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
/* schedule unpin of the old buffer */
obj = crtc->primary->fb->obj[0];
/* take a reference to the old object */
work->old_abo = gem_to_amdgpu_bo(obj);
amdgpu_bo_ref(work->old_abo);
obj = fb->obj[0];
new_abo = gem_to_amdgpu_bo(obj);
/* pin the new buffer */
r = amdgpu_bo_reserve(new_abo, false);
if (unlikely(r != 0)) {
DRM_ERROR("failed to reserve new abo buffer before flip\n");
goto cleanup;
}
if (!adev->enable_virtual_display) {
r = amdgpu_bo_pin(new_abo,
amdgpu_display_supported_domains(adev, new_abo->flags));
if (unlikely(r != 0)) {
DRM_ERROR("failed to pin new abo buffer before flip\n");
goto unreserve;
}
}
r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
if (unlikely(r != 0)) {
DRM_ERROR("%p bind failed\n", new_abo);
goto unpin;
}
r = dma_resv_get_fences(new_abo->tbo.base.resv, DMA_RESV_USAGE_WRITE,
&work->shared_count,
&work->shared);
if (unlikely(r != 0)) {
DRM_ERROR("failed to get fences for buffer\n");
goto unpin;
}
amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
amdgpu_bo_unreserve(new_abo);
if (!adev->enable_virtual_display)
work->base = amdgpu_bo_gpu_offset(new_abo);
work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
amdgpu_get_vblank_counter_kms(crtc);
/* we borrow the event spin lock for protecting flip_wrok */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_NONE) {
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
r = -EBUSY;
goto pflip_cleanup;
}
amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
amdgpu_crtc->pflip_works = work;
DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
amdgpu_crtc->crtc_id, amdgpu_crtc, work);
/* update crtc fb */
crtc->primary->fb = fb;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
amdgpu_display_flip_work_func(&work->flip_work.work);
return 0;
pflip_cleanup:
if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
DRM_ERROR("failed to reserve new abo in error path\n");
goto cleanup;
}
unpin:
if (!adev->enable_virtual_display)
amdgpu_bo_unpin(new_abo);
unreserve:
amdgpu_bo_unreserve(new_abo);
cleanup:
amdgpu_bo_unref(&work->old_abo);
for (i = 0; i < work->shared_count; ++i)
dma_fence_put(work->shared[i]);
kfree(work->shared);
kfree(work);
return r;
}
int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_device *dev;
struct amdgpu_device *adev;
struct drm_crtc *crtc;
bool active = false;
int ret;
if (!set || !set->crtc)
return -EINVAL;
dev = set->crtc->dev;
ret = pm_runtime_get_sync(dev->dev);
if (ret < 0)
goto out;
ret = drm_crtc_helper_set_config(set, ctx);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
if (crtc->enabled)
active = true;
pm_runtime_mark_last_busy(dev->dev);
adev = drm_to_adev(dev);
/* if we have active crtcs and we don't have a power ref,
* take the current one
*/
if (active && !adev->have_disp_power_ref) {
adev->have_disp_power_ref = true;
return ret;
}
/* if we have no active crtcs, then drop the power ref
* we got before
*/
if (!active && adev->have_disp_power_ref) {
pm_runtime_put_autosuspend(dev->dev);
adev->have_disp_power_ref = false;
}
out:
/* drop the power reference we got coming in here */
pm_runtime_put_autosuspend(dev->dev);
return ret;
}
static const char *encoder_names[41] = {
"NONE",
"INTERNAL_LVDS",
"INTERNAL_TMDS1",
"INTERNAL_TMDS2",
"INTERNAL_DAC1",
"INTERNAL_DAC2",
"INTERNAL_SDVOA",
"INTERNAL_SDVOB",
"SI170B",
"CH7303",
"CH7301",
"INTERNAL_DVO1",
"EXTERNAL_SDVOA",
"EXTERNAL_SDVOB",
"TITFP513",
"INTERNAL_LVTM1",
"VT1623",
"HDMI_SI1930",
"HDMI_INTERNAL",
"INTERNAL_KLDSCP_TMDS1",
"INTERNAL_KLDSCP_DVO1",
"INTERNAL_KLDSCP_DAC1",
"INTERNAL_KLDSCP_DAC2",
"SI178",
"MVPU_FPGA",
"INTERNAL_DDI",
"VT1625",
"HDMI_SI1932",
"DP_AN9801",
"DP_DP501",
"INTERNAL_UNIPHY",
"INTERNAL_KLDSCP_LVTMA",
"INTERNAL_UNIPHY1",
"INTERNAL_UNIPHY2",
"NUTMEG",
"TRAVIS",
"INTERNAL_VCE",
"INTERNAL_UNIPHY3",
"HDMI_ANX9805",
"INTERNAL_AMCLK",
"VIRTUAL",
};
static const char *hpd_names[6] = {
"HPD1",
"HPD2",
"HPD3",
"HPD4",
"HPD5",
"HPD6",
};
void amdgpu_display_print_display_setup(struct drm_device *dev)
{
struct drm_connector *connector;
struct amdgpu_connector *amdgpu_connector;
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
struct drm_connector_list_iter iter;
uint32_t devices;
int i = 0;
drm_connector_list_iter_begin(dev, &iter);
DRM_INFO("AMDGPU Display Connectors\n");
drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
DRM_INFO("Connector %d:\n", i);
DRM_INFO(" %s\n", connector->name);
if (amdgpu_connector->hpd.hpd != AMDGPU_HPD_NONE)
DRM_INFO(" %s\n", hpd_names[amdgpu_connector->hpd.hpd]);
if (amdgpu_connector->ddc_bus) {
DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
amdgpu_connector->ddc_bus->rec.mask_clk_reg,
amdgpu_connector->ddc_bus->rec.mask_data_reg,
amdgpu_connector->ddc_bus->rec.a_clk_reg,
amdgpu_connector->ddc_bus->rec.a_data_reg,
amdgpu_connector->ddc_bus->rec.en_clk_reg,
amdgpu_connector->ddc_bus->rec.en_data_reg,
amdgpu_connector->ddc_bus->rec.y_clk_reg,
amdgpu_connector->ddc_bus->rec.y_data_reg);
if (amdgpu_connector->router.ddc_valid)
DRM_INFO(" DDC Router 0x%x/0x%x\n",
amdgpu_connector->router.ddc_mux_control_pin,
amdgpu_connector->router.ddc_mux_state);
if (amdgpu_connector->router.cd_valid)
DRM_INFO(" Clock/Data Router 0x%x/0x%x\n",
amdgpu_connector->router.cd_mux_control_pin,
amdgpu_connector->router.cd_mux_state);
} else {
if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to [email protected]\n");
}
DRM_INFO(" Encoders:\n");
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
amdgpu_encoder = to_amdgpu_encoder(encoder);
devices = amdgpu_encoder->devices & amdgpu_connector->devices;
if (devices) {
if (devices & ATOM_DEVICE_CRT1_SUPPORT)
DRM_INFO(" CRT1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
if (devices & ATOM_DEVICE_CRT2_SUPPORT)
DRM_INFO(" CRT2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
if (devices & ATOM_DEVICE_LCD1_SUPPORT)
DRM_INFO(" LCD1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
if (devices & ATOM_DEVICE_DFP1_SUPPORT)
DRM_INFO(" DFP1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
if (devices & ATOM_DEVICE_DFP2_SUPPORT)
DRM_INFO(" DFP2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
if (devices & ATOM_DEVICE_DFP3_SUPPORT)
DRM_INFO(" DFP3: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
if (devices & ATOM_DEVICE_DFP4_SUPPORT)
DRM_INFO(" DFP4: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
if (devices & ATOM_DEVICE_DFP5_SUPPORT)
DRM_INFO(" DFP5: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
if (devices & ATOM_DEVICE_DFP6_SUPPORT)
DRM_INFO(" DFP6: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
if (devices & ATOM_DEVICE_TV1_SUPPORT)
DRM_INFO(" TV1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
if (devices & ATOM_DEVICE_CV_SUPPORT)
DRM_INFO(" CV: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
}
}
i++;
}
drm_connector_list_iter_end(&iter);
}
bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
bool use_aux)
{
u8 out = 0x0;
u8 buf[8];
int ret;
struct i2c_msg msgs[] = {
{
.addr = DDC_ADDR,
.flags = 0,
.len = 1,
.buf = &out,
},
{
.addr = DDC_ADDR,
.flags = I2C_M_RD,
.len = 8,
.buf = buf,
}
};
/* on hw with routers, select right port */
if (amdgpu_connector->router.ddc_valid)
amdgpu_i2c_router_select_ddc_port(amdgpu_connector);
if (use_aux)
ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2);
else
ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2);
if (ret != 2)
/* Couldn't find an accessible DDC on this connector */
return false;
/* Probe also for valid EDID header
* EDID header starts with:
* 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
* Only the first 6 bytes must be valid as
* drm_edid_block_valid() can fix the last 2 bytes
*/
if (drm_edid_header_is_valid(buf) < 6) {
/* Couldn't find an accessible EDID on this
* connector
*/
return false;
}
return true;
}
static int amdgpu_dirtyfb(struct drm_framebuffer *fb, struct drm_file *file,
unsigned int flags, unsigned int color,
struct drm_clip_rect *clips, unsigned int num_clips)
{
if (file)
return -ENOSYS;
return drm_atomic_helper_dirtyfb(fb, file, flags, color, clips,
num_clips);
}
static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
.destroy = drm_gem_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
};
static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = {
.destroy = drm_gem_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
.dirty = amdgpu_dirtyfb
};
uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
uint64_t bo_flags)
{
uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
#if defined(CONFIG_DRM_AMD_DC)
/*
* if amdgpu_bo_support_uswc returns false it means that USWC mappings
* is not supported for this board. But this mapping is required
* to avoid hang caused by placement of scanout BO in GTT on certain
* APUs. So force the BO placement to VRAM in case this architecture
* will not allow USWC mappings.
* Also, don't allow GTT domain if the BO doesn't have USWC flag set.
*/
if ((bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
amdgpu_bo_support_uswc(bo_flags) &&
adev->dc_enabled &&
adev->mode_info.gpu_vm_support)
domain |= AMDGPU_GEM_DOMAIN_GTT;
#endif
return domain;
}
static const struct drm_format_info dcc_formats[] = {
{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
.cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
.cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
.cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
.has_alpha = true, },
{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
.cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
.has_alpha = true, },
{ .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 2,
.cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
.has_alpha = true, },
{ .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 2,
.cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
{ .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 2,
.cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
{ .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 2,
.cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
.has_alpha = true, },
{ .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 2,
.cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
.has_alpha = true, },
{ .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 2,
.cpp = { 2, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
};
static const struct drm_format_info dcc_retile_formats[] = {
{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
.cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
.cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
.cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
.has_alpha = true, },
{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
.cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
.has_alpha = true, },
{ .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 3,
.cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
.has_alpha = true, },
{ .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 3,
.cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
{ .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 3,
.cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
{ .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 3,
.cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
.has_alpha = true, },
{ .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 3,
.cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
.has_alpha = true, },
{ .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 3,
.cpp = { 2, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
};
static const struct drm_format_info *
lookup_format_info(const struct drm_format_info formats[],
int num_formats, u32 format)
{
int i;
for (i = 0; i < num_formats; i++) {
if (formats[i].format == format)
return &formats[i];
}
return NULL;
}
const struct drm_format_info *
amdgpu_lookup_format_info(u32 format, uint64_t modifier)
{
if (!IS_AMD_FMT_MOD(modifier))
return NULL;
if (AMD_FMT_MOD_GET(DCC_RETILE, modifier))
return lookup_format_info(dcc_retile_formats,
ARRAY_SIZE(dcc_retile_formats),
format);
if (AMD_FMT_MOD_GET(DCC, modifier))
return lookup_format_info(dcc_formats, ARRAY_SIZE(dcc_formats),
format);
/* returning NULL will cause the default format structs to be used. */
return NULL;
}
/*
* Tries to extract the renderable DCC offset from the opaque metadata attached
* to the buffer.
*/
static int
extract_render_dcc_offset(struct amdgpu_device *adev,
struct drm_gem_object *obj,
uint64_t *offset)
{
struct amdgpu_bo *rbo;
int r = 0;
uint32_t metadata[10]; /* Something that fits a descriptor + header. */
uint32_t size;
rbo = gem_to_amdgpu_bo(obj);
r = amdgpu_bo_reserve(rbo, false);
if (unlikely(r)) {
/* Don't show error message when returning -ERESTARTSYS */
if (r != -ERESTARTSYS)
DRM_ERROR("Unable to reserve buffer: %d\n", r);
return r;
}
r = amdgpu_bo_get_metadata(rbo, metadata, sizeof(metadata), &size, NULL);
amdgpu_bo_unreserve(rbo);
if (r)
return r;
/*
* The first word is the metadata version, and we need space for at least
* the version + pci vendor+device id + 8 words for a descriptor.
*/
if (size < 40 || metadata[0] != 1)
return -EINVAL;
if (adev->family >= AMDGPU_FAMILY_NV) {
/* resource word 6/7 META_DATA_ADDRESS{_LO} */
*offset = ((u64)metadata[9] << 16u) |
((metadata[8] & 0xFF000000u) >> 16);
} else {
/* resource word 5/7 META_DATA_ADDRESS */
*offset = ((u64)metadata[9] << 8u) |
((u64)(metadata[7] & 0x1FE0000u) << 23);
}
return 0;
}
static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb)
{
struct amdgpu_device *adev = drm_to_adev(afb->base.dev);
uint64_t modifier = 0;
int num_pipes = 0;
int num_pkrs = 0;
num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
num_pipes = adev->gfx.config.gb_addr_config_fields.num_pipes;
if (!afb->tiling_flags || !AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE)) {
modifier = DRM_FORMAT_MOD_LINEAR;
} else {
int swizzle = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE);
bool has_xor = swizzle >= 16;
int block_size_bits;
int version;
int pipe_xor_bits = 0;
int bank_xor_bits = 0;
int packers = 0;
int rb = 0;
int pipes = ilog2(num_pipes);
uint32_t dcc_offset = AMDGPU_TILING_GET(afb->tiling_flags, DCC_OFFSET_256B);
switch (swizzle >> 2) {
case 0: /* 256B */
block_size_bits = 8;
break;
case 1: /* 4KiB */
case 5: /* 4KiB _X */
block_size_bits = 12;
break;
case 2: /* 64KiB */
case 4: /* 64 KiB _T */
case 6: /* 64 KiB _X */
block_size_bits = 16;
break;
case 7: /* 256 KiB */
block_size_bits = 18;
break;
default:
/* RESERVED or VAR */
return -EINVAL;
}
if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
version = AMD_FMT_MOD_TILE_VER_GFX11;
else if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
version = AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS;
else if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0))
version = AMD_FMT_MOD_TILE_VER_GFX10;
else
version = AMD_FMT_MOD_TILE_VER_GFX9;
switch (swizzle & 3) {
case 0: /* Z microtiling */
return -EINVAL;
case 1: /* S microtiling */
if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0)) {
if (!has_xor)
version = AMD_FMT_MOD_TILE_VER_GFX9;
}
break;
case 2:
if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0)) {
if (!has_xor && afb->base.format->cpp[0] != 4)
version = AMD_FMT_MOD_TILE_VER_GFX9;
}
break;
case 3:
break;
}
if (has_xor) {
if (num_pipes == num_pkrs && num_pkrs == 0) {
DRM_ERROR("invalid number of pipes and packers\n");
return -EINVAL;
}
switch (version) {
case AMD_FMT_MOD_TILE_VER_GFX11:
pipe_xor_bits = min(block_size_bits - 8, pipes);
packers = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
break;
case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS:
pipe_xor_bits = min(block_size_bits - 8, pipes);
packers = min(block_size_bits - 8 - pipe_xor_bits,
ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs));
break;
case AMD_FMT_MOD_TILE_VER_GFX10:
pipe_xor_bits = min(block_size_bits - 8, pipes);
break;
case AMD_FMT_MOD_TILE_VER_GFX9:
rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
pipe_xor_bits = min(block_size_bits - 8, pipes +
ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
bank_xor_bits = min(block_size_bits - 8 - pipe_xor_bits,
ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
break;
}
}
modifier = AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE)) |
AMD_FMT_MOD_SET(TILE_VERSION, version) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
AMD_FMT_MOD_SET(PACKERS, packers);
if (dcc_offset != 0) {
bool dcc_i64b = AMDGPU_TILING_GET(afb->tiling_flags, DCC_INDEPENDENT_64B) != 0;
bool dcc_i128b = version >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS;
const struct drm_format_info *format_info;
u64 render_dcc_offset;
/* Enable constant encode on RAVEN2 and later. */
bool dcc_constant_encode = (adev->asic_type > CHIP_RAVEN ||
(adev->asic_type == CHIP_RAVEN &&
adev->external_rev_id >= 0x81)) &&
adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0);
int max_cblock_size = dcc_i64b ? AMD_FMT_MOD_DCC_BLOCK_64B :
dcc_i128b ? AMD_FMT_MOD_DCC_BLOCK_128B :
AMD_FMT_MOD_DCC_BLOCK_256B;
modifier |= AMD_FMT_MOD_SET(DCC, 1) |
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, dcc_constant_encode) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, dcc_i64b) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, dcc_i128b) |
AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, max_cblock_size);
afb->base.offsets[1] = dcc_offset * 256 + afb->base.offsets[0];
afb->base.pitches[1] =
AMDGPU_TILING_GET(afb->tiling_flags, DCC_PITCH_MAX) + 1;
/*
* If the userspace driver uses retiling the tiling flags do not contain
* info on the renderable DCC buffer. Luckily the opaque metadata contains
* the info so we can try to extract it. The kernel does not use this info
* but we should convert it to a modifier plane for getfb2, so the
* userspace driver that gets it doesn't have to juggle around another DCC
* plane internally.
*/
if (extract_render_dcc_offset(adev, afb->base.obj[0],
&render_dcc_offset) == 0 &&
render_dcc_offset != 0 &&
render_dcc_offset != afb->base.offsets[1] &&
render_dcc_offset < UINT_MAX) {
uint32_t dcc_block_bits; /* of base surface data */
modifier |= AMD_FMT_MOD_SET(DCC_RETILE, 1);
afb->base.offsets[2] = render_dcc_offset;
if (adev->family >= AMDGPU_FAMILY_NV) {
int extra_pipe = 0;
if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) &&
pipes == packers && pipes > 1)
extra_pipe = 1;
dcc_block_bits = max(20, 16 + pipes + extra_pipe);
} else {
modifier |= AMD_FMT_MOD_SET(RB, rb) |
AMD_FMT_MOD_SET(PIPE, pipes);
dcc_block_bits = max(20, 18 + rb);
}
dcc_block_bits -= ilog2(afb->base.format->cpp[0]);
afb->base.pitches[2] = ALIGN(afb->base.width,
1u << ((dcc_block_bits + 1) / 2));
}
format_info = amdgpu_lookup_format_info(afb->base.format->format,
modifier);
if (!format_info)
return -EINVAL;
afb->base.format = format_info;
}
}
afb->base.modifier = modifier;
afb->base.flags |= DRM_MODE_FB_MODIFIERS;
return 0;
}
/* Mirrors the is_displayable check in radeonsi's gfx6_compute_surface */
static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb)
{
u64 micro_tile_mode;
/* Zero swizzle mode means linear */
if (AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0)
return 0;
micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE);
switch (micro_tile_mode) {
case 0: /* DISPLAY */
case 3: /* RENDER */
return 0;
default:
drm_dbg_kms(afb->base.dev,
"Micro tile mode %llu not supported for scanout\n",
micro_tile_mode);
return -EINVAL;
}
}
static void get_block_dimensions(unsigned int block_log2, unsigned int cpp,
unsigned int *width, unsigned int *height)
{
unsigned int cpp_log2 = ilog2(cpp);
unsigned int pixel_log2 = block_log2 - cpp_log2;
unsigned int width_log2 = (pixel_log2 + 1) / 2;
unsigned int height_log2 = pixel_log2 - width_log2;
*width = 1 << width_log2;
*height = 1 << height_log2;
}
static unsigned int get_dcc_block_size(uint64_t modifier, bool rb_aligned,
bool pipe_aligned)
{
unsigned int ver = AMD_FMT_MOD_GET(TILE_VERSION, modifier);
switch (ver) {
case AMD_FMT_MOD_TILE_VER_GFX9: {
/*
* TODO: for pipe aligned we may need to check the alignment of the
* total size of the surface, which may need to be bigger than the
* natural alignment due to some HW workarounds
*/
return max(10 + (rb_aligned ? (int)AMD_FMT_MOD_GET(RB, modifier) : 0), 12);
}
case AMD_FMT_MOD_TILE_VER_GFX10:
case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS:
case AMD_FMT_MOD_TILE_VER_GFX11: {
int pipes_log2 = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
if (ver >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS && pipes_log2 > 1 &&
AMD_FMT_MOD_GET(PACKERS, modifier) == pipes_log2)
++pipes_log2;
return max(8 + (pipe_aligned ? pipes_log2 : 0), 12);
}
default:
return 0;
}
}
static int amdgpu_display_verify_plane(struct amdgpu_framebuffer *rfb, int plane,
const struct drm_format_info *format,
unsigned int block_width, unsigned int block_height,
unsigned int block_size_log2)
{
unsigned int width = rfb->base.width /
((plane && plane < format->num_planes) ? format->hsub : 1);
unsigned int height = rfb->base.height /
((plane && plane < format->num_planes) ? format->vsub : 1);
unsigned int cpp = plane < format->num_planes ? format->cpp[plane] : 1;
unsigned int block_pitch = block_width * cpp;
unsigned int min_pitch = ALIGN(width * cpp, block_pitch);
unsigned int block_size = 1 << block_size_log2;
uint64_t size;
if (rfb->base.pitches[plane] % block_pitch) {
drm_dbg_kms(rfb->base.dev,
"pitch %d for plane %d is not a multiple of block pitch %d\n",
rfb->base.pitches[plane], plane, block_pitch);
return -EINVAL;
}
if (rfb->base.pitches[plane] < min_pitch) {
drm_dbg_kms(rfb->base.dev,
"pitch %d for plane %d is less than minimum pitch %d\n",
rfb->base.pitches[plane], plane, min_pitch);
return -EINVAL;
}
/* Force at least natural alignment. */
if (rfb->base.offsets[plane] % block_size) {
drm_dbg_kms(rfb->base.dev,
"offset 0x%x for plane %d is not a multiple of block pitch 0x%x\n",
rfb->base.offsets[plane], plane, block_size);
return -EINVAL;
}
size = rfb->base.offsets[plane] +
(uint64_t)rfb->base.pitches[plane] / block_pitch *
block_size * DIV_ROUND_UP(height, block_height);
if (rfb->base.obj[0]->size < size) {
drm_dbg_kms(rfb->base.dev,
"BO size 0x%zx is less than 0x%llx required for plane %d\n",
rfb->base.obj[0]->size, size, plane);
return -EINVAL;
}
return 0;
}
static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb)
{
const struct drm_format_info *format_info = drm_format_info(rfb->base.format->format);
uint64_t modifier = rfb->base.modifier;
int ret;
unsigned int i, block_width, block_height, block_size_log2;
if (rfb->base.dev->mode_config.fb_modifiers_not_supported)
return 0;
for (i = 0; i < format_info->num_planes; ++i) {
if (modifier == DRM_FORMAT_MOD_LINEAR) {
block_width = 256 / format_info->cpp[i];
block_height = 1;
block_size_log2 = 8;
} else {
int swizzle = AMD_FMT_MOD_GET(TILE, modifier);
switch ((swizzle & ~3) + 1) {
case DC_SW_256B_S:
block_size_log2 = 8;
break;
case DC_SW_4KB_S:
case DC_SW_4KB_S_X:
block_size_log2 = 12;
break;
case DC_SW_64KB_S:
case DC_SW_64KB_S_T:
case DC_SW_64KB_S_X:
block_size_log2 = 16;
break;
case DC_SW_VAR_S_X:
block_size_log2 = 18;
break;
default:
drm_dbg_kms(rfb->base.dev,
"Swizzle mode with unknown block size: %d\n", swizzle);
return -EINVAL;
}
get_block_dimensions(block_size_log2, format_info->cpp[i],
&block_width, &block_height);
}
ret = amdgpu_display_verify_plane(rfb, i, format_info,
block_width, block_height, block_size_log2);
if (ret)
return ret;
}
if (AMD_FMT_MOD_GET(DCC, modifier)) {
if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) {
block_size_log2 = get_dcc_block_size(modifier, false, false);
get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
&block_width, &block_height);
ret = amdgpu_display_verify_plane(rfb, i, format_info,
block_width, block_height,
block_size_log2);
if (ret)
return ret;
++i;
block_size_log2 = get_dcc_block_size(modifier, true, true);
} else {
bool pipe_aligned = AMD_FMT_MOD_GET(DCC_PIPE_ALIGN, modifier);
block_size_log2 = get_dcc_block_size(modifier, true, pipe_aligned);
}
get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
&block_width, &block_height);
ret = amdgpu_display_verify_plane(rfb, i, format_info,
block_width, block_height, block_size_log2);
if (ret)
return ret;
}
return 0;
}
static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
uint64_t *tiling_flags, bool *tmz_surface)
{
struct amdgpu_bo *rbo;
int r;
if (!amdgpu_fb) {
*tiling_flags = 0;
*tmz_surface = false;
return 0;
}
rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
r = amdgpu_bo_reserve(rbo, false);
if (unlikely(r)) {
/* Don't show error message when returning -ERESTARTSYS */
if (r != -ERESTARTSYS)
DRM_ERROR("Unable to reserve buffer: %d\n", r);
return r;
}
if (tiling_flags)
amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
if (tmz_surface)
*tmz_surface = amdgpu_bo_encrypted(rbo);
amdgpu_bo_unreserve(rbo);
return r;
}
static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
struct amdgpu_framebuffer *rfb,
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
int ret;
rfb->base.obj[0] = obj;
drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
/* Verify that the modifier is supported. */
if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
mode_cmd->modifier[0])) {
drm_dbg_kms(dev,
"unsupported pixel format %p4cc / modifier 0x%llx\n",
&mode_cmd->pixel_format, mode_cmd->modifier[0]);
ret = -EINVAL;
goto err;
}
ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
if (ret)
goto err;
if (drm_drv_uses_atomic_modeset(dev))
ret = drm_framebuffer_init(dev, &rfb->base,
&amdgpu_fb_funcs_atomic);
else
ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
if (ret)
goto err;
return 0;
err:
drm_dbg_kms(dev, "Failed to verify and init gem fb: %d\n", ret);
rfb->base.obj[0] = NULL;
return ret;
}
static int amdgpu_display_framebuffer_init(struct drm_device *dev,
struct amdgpu_framebuffer *rfb,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
struct amdgpu_device *adev = drm_to_adev(dev);
int ret, i;
/*
* This needs to happen before modifier conversion as that might change
* the number of planes.
*/
for (i = 1; i < rfb->base.format->num_planes; ++i) {
if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
drm_dbg_kms(dev, "Plane 0 and %d have different BOs: %u vs. %u\n",
i, mode_cmd->handles[0], mode_cmd->handles[i]);
ret = -EINVAL;
return ret;
}
}
ret = amdgpu_display_get_fb_info(rfb, &rfb->tiling_flags, &rfb->tmz_surface);
if (ret)
return ret;
if (dev->mode_config.fb_modifiers_not_supported && !adev->enable_virtual_display) {
drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI,
"GFX9+ requires FB check based on format modifier\n");
ret = check_tiling_flags_gfx6(rfb);
if (ret)
return ret;
}
if (!dev->mode_config.fb_modifiers_not_supported &&
!(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) {
ret = convert_tiling_flags_to_modifier(rfb);
if (ret) {
drm_dbg_kms(dev, "Failed to convert tiling flags 0x%llX to a modifier",
rfb->tiling_flags);
return ret;
}
}
ret = amdgpu_display_verify_sizes(rfb);
if (ret)
return ret;
for (i = 0; i < rfb->base.format->num_planes; ++i) {
drm_gem_object_get(rfb->base.obj[0]);
rfb->base.obj[i] = rfb->base.obj[0];
}
return 0;
}
struct drm_framebuffer *
amdgpu_display_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct amdgpu_framebuffer *amdgpu_fb;
struct drm_gem_object *obj;
struct amdgpu_bo *bo;
uint32_t domains;
int ret;
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
if (obj == NULL) {
drm_dbg_kms(dev,
"No GEM object associated to handle 0x%08X, can't create framebuffer\n",
mode_cmd->handles[0]);
return ERR_PTR(-ENOENT);
}
/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
bo = gem_to_amdgpu_bo(obj);
domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags);
if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n");
drm_gem_object_put(obj);
return ERR_PTR(-EINVAL);
}
amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
if (amdgpu_fb == NULL) {
drm_gem_object_put(obj);
return ERR_PTR(-ENOMEM);
}
ret = amdgpu_display_gem_fb_verify_and_init(dev, amdgpu_fb, file_priv,
mode_cmd, obj);
if (ret) {
kfree(amdgpu_fb);
drm_gem_object_put(obj);
return ERR_PTR(ret);
}
drm_gem_object_put(obj);
return &amdgpu_fb->base;
}
const struct drm_mode_config_funcs amdgpu_mode_funcs = {
.fb_create = amdgpu_display_user_framebuffer_create,
};
static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] = {
{ UNDERSCAN_OFF, "off" },
{ UNDERSCAN_ON, "on" },
{ UNDERSCAN_AUTO, "auto" },
};
static const struct drm_prop_enum_list amdgpu_audio_enum_list[] = {
{ AMDGPU_AUDIO_DISABLE, "off" },
{ AMDGPU_AUDIO_ENABLE, "on" },
{ AMDGPU_AUDIO_AUTO, "auto" },
};
/* XXX support different dither options? spatial, temporal, both, etc. */
static const struct drm_prop_enum_list amdgpu_dither_enum_list[] = {
{ AMDGPU_FMT_DITHER_DISABLE, "off" },
{ AMDGPU_FMT_DITHER_ENABLE, "on" },
};
int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
{
int sz;
adev->mode_info.coherent_mode_property =
drm_property_create_range(adev_to_drm(adev), 0, "coherent", 0, 1);
if (!adev->mode_info.coherent_mode_property)
return -ENOMEM;
adev->mode_info.load_detect_property =
drm_property_create_range(adev_to_drm(adev), 0, "load detection", 0, 1);
if (!adev->mode_info.load_detect_property)
return -ENOMEM;
drm_mode_create_scaling_mode_property(adev_to_drm(adev));
sz = ARRAY_SIZE(amdgpu_underscan_enum_list);
adev->mode_info.underscan_property =
drm_property_create_enum(adev_to_drm(adev), 0,
"underscan",
amdgpu_underscan_enum_list, sz);
adev->mode_info.underscan_hborder_property =
drm_property_create_range(adev_to_drm(adev), 0,
"underscan hborder", 0, 128);
if (!adev->mode_info.underscan_hborder_property)
return -ENOMEM;
adev->mode_info.underscan_vborder_property =
drm_property_create_range(adev_to_drm(adev), 0,
"underscan vborder", 0, 128);
if (!adev->mode_info.underscan_vborder_property)
return -ENOMEM;
sz = ARRAY_SIZE(amdgpu_audio_enum_list);
adev->mode_info.audio_property =
drm_property_create_enum(adev_to_drm(adev), 0,
"audio",
amdgpu_audio_enum_list, sz);
sz = ARRAY_SIZE(amdgpu_dither_enum_list);
adev->mode_info.dither_property =
drm_property_create_enum(adev_to_drm(adev), 0,
"dither",
amdgpu_dither_enum_list, sz);
if (adev->dc_enabled) {
adev->mode_info.abm_level_property =
drm_property_create_range(adev_to_drm(adev), 0,
"abm level", 0, 4);
if (!adev->mode_info.abm_level_property)
return -ENOMEM;
}
return 0;
}
void amdgpu_display_update_priority(struct amdgpu_device *adev)
{
/* adjustment options for the display watermarks */
if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2))
adev->mode_info.disp_priority = 0;
else
adev->mode_info.disp_priority = amdgpu_disp_priority;
}
static bool amdgpu_display_is_hdtv_mode(const struct drm_display_mode *mode)
{
/* try and guess if this is a tv or a monitor */
if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
(mode->vdisplay == 576) || /* 576p */
(mode->vdisplay == 720) || /* 720p */
(mode->vdisplay == 1080)) /* 1080p */
return true;
else
return false;
}
bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_encoder *amdgpu_encoder;
struct drm_connector *connector;
u32 src_v = 1, dst_v = 1;
u32 src_h = 1, dst_h = 1;
amdgpu_crtc->h_border = 0;
amdgpu_crtc->v_border = 0;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc != crtc)
continue;
amdgpu_encoder = to_amdgpu_encoder(encoder);
connector = amdgpu_get_connector_for_encoder(encoder);
/* set scaling */
if (amdgpu_encoder->rmx_type == RMX_OFF)
amdgpu_crtc->rmx_type = RMX_OFF;
else if (mode->hdisplay < amdgpu_encoder->native_mode.hdisplay ||
mode->vdisplay < amdgpu_encoder->native_mode.vdisplay)
amdgpu_crtc->rmx_type = amdgpu_encoder->rmx_type;
else
amdgpu_crtc->rmx_type = RMX_OFF;
/* copy native mode */
memcpy(&amdgpu_crtc->native_mode,
&amdgpu_encoder->native_mode,
sizeof(struct drm_display_mode));
src_v = crtc->mode.vdisplay;
dst_v = amdgpu_crtc->native_mode.vdisplay;
src_h = crtc->mode.hdisplay;
dst_h = amdgpu_crtc->native_mode.hdisplay;
/* fix up for overscan on hdmi */
if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
((amdgpu_encoder->underscan_type == UNDERSCAN_ON) ||
((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) &&
connector->display_info.is_hdmi &&
amdgpu_display_is_hdtv_mode(mode)))) {
if (amdgpu_encoder->underscan_hborder != 0)
amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder;
else
amdgpu_crtc->h_border = (mode->hdisplay >> 5) + 16;
if (amdgpu_encoder->underscan_vborder != 0)
amdgpu_crtc->v_border = amdgpu_encoder->underscan_vborder;
else
amdgpu_crtc->v_border = (mode->vdisplay >> 5) + 16;
amdgpu_crtc->rmx_type = RMX_FULL;
src_v = crtc->mode.vdisplay;
dst_v = crtc->mode.vdisplay - (amdgpu_crtc->v_border * 2);
src_h = crtc->mode.hdisplay;
dst_h = crtc->mode.hdisplay - (amdgpu_crtc->h_border * 2);
}
}
if (amdgpu_crtc->rmx_type != RMX_OFF) {
fixed20_12 a, b;
a.full = dfixed_const(src_v);
b.full = dfixed_const(dst_v);
amdgpu_crtc->vsc.full = dfixed_div(a, b);
a.full = dfixed_const(src_h);
b.full = dfixed_const(dst_h);
amdgpu_crtc->hsc.full = dfixed_div(a, b);
} else {
amdgpu_crtc->vsc.full = dfixed_const(1);
amdgpu_crtc->hsc.full = dfixed_const(1);
}
return true;
}
/*
* Retrieve current video scanout position of crtc on a given gpu, and
* an optional accurate timestamp of when query happened.
*
* \param dev Device to query.
* \param pipe Crtc to query.
* \param flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
* For driver internal use only also supports these flags:
*
* USE_REAL_VBLANKSTART to use the real start of vblank instead
* of a fudged earlier start of vblank.
*
* GET_DISTANCE_TO_VBLANKSTART to return distance to the
* fudged earlier start of vblank in *vpos and the distance
* to true start of vblank in *hpos.
*
* \param *vpos Location where vertical scanout position should be stored.
* \param *hpos Location where horizontal scanout position should go.
* \param *stime Target location for timestamp taken immediately before
* scanout position query. Can be NULL to skip timestamp.
* \param *etime Target location for timestamp taken immediately after
* scanout position query. Can be NULL to skip timestamp.
*
* Returns vpos as a positive number while in active scanout area.
* Returns vpos as a negative number inside vblank, counting the number
* of scanlines to go until end of vblank, e.g., -1 means "one scanline
* until start of active scanout / end of vblank."
*
* \return Flags, or'ed together as follows:
*
* DRM_SCANOUTPOS_VALID = Query successful.
* DRM_SCANOUTPOS_INVBL = Inside vblank.
* DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
* this flag means that returned position may be offset by a constant but
* unknown small number of scanlines wrt. real scanout position.
*
*/
int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
unsigned int pipe, unsigned int flags, int *vpos,
int *hpos, ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode)
{
u32 vbl = 0, position = 0;
int vbl_start, vbl_end, vtotal, ret = 0;
bool in_vbl = true;
struct amdgpu_device *adev = drm_to_adev(dev);
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
/* Get optional system timestamp before query. */
if (stime)
*stime = ktime_get();
if (amdgpu_display_page_flip_get_scanoutpos(adev, pipe, &vbl, &position) == 0)
ret |= DRM_SCANOUTPOS_VALID;
/* Get optional system timestamp after query. */
if (etime)
*etime = ktime_get();
/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
/* Decode into vertical and horizontal scanout position. */
*vpos = position & 0x1fff;
*hpos = (position >> 16) & 0x1fff;
/* Valid vblank area boundaries from gpu retrieved? */
if (vbl > 0) {
/* Yes: Decode. */
ret |= DRM_SCANOUTPOS_ACCURATE;
vbl_start = vbl & 0x1fff;
vbl_end = (vbl >> 16) & 0x1fff;
} else {
/* No: Fake something reasonable which gives at least ok results. */
vbl_start = mode->crtc_vdisplay;
vbl_end = 0;
}
/* Called from driver internal vblank counter query code? */
if (flags & GET_DISTANCE_TO_VBLANKSTART) {
/* Caller wants distance from real vbl_start in *hpos */
*hpos = *vpos - vbl_start;
}
/* Fudge vblank to start a few scanlines earlier to handle the
* problem that vblank irqs fire a few scanlines before start
* of vblank. Some driver internal callers need the true vblank
* start to be used and signal this via the USE_REAL_VBLANKSTART flag.
*
* The cause of the "early" vblank irq is that the irq is triggered
* by the line buffer logic when the line buffer read position enters
* the vblank, whereas our crtc scanout position naturally lags the
* line buffer read position.
*/
if (!(flags & USE_REAL_VBLANKSTART))
vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
/* Test scanout position against vblank region. */
if ((*vpos < vbl_start) && (*vpos >= vbl_end))
in_vbl = false;
/* In vblank? */
if (in_vbl)
ret |= DRM_SCANOUTPOS_IN_VBLANK;
/* Called from driver internal vblank counter query code? */
if (flags & GET_DISTANCE_TO_VBLANKSTART) {
/* Caller wants distance from fudged earlier vbl_start */
*vpos -= vbl_start;
return ret;
}
/* Check if inside vblank area and apply corrective offsets:
* vpos will then be >=0 in video scanout area, but negative
* within vblank area, counting down the number of lines until
* start of scanout.
*/
/* Inside "upper part" of vblank area? Apply corrective offset if so: */
if (in_vbl && (*vpos >= vbl_start)) {
vtotal = mode->crtc_vtotal;
/* With variable refresh rate displays the vpos can exceed
* the vtotal value. Clamp to 0 to return -vbl_end instead
* of guessing the remaining number of lines until scanout.
*/
*vpos = (*vpos < vtotal) ? (*vpos - vtotal) : 0;
}
/* Correct for shifted end of vbl at vbl_end. */
*vpos = *vpos - vbl_end;
return ret;
}
int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
{
if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
return AMDGPU_CRTC_IRQ_NONE;
switch (crtc) {
case 0:
return AMDGPU_CRTC_IRQ_VBLANK1;
case 1:
return AMDGPU_CRTC_IRQ_VBLANK2;
case 2:
return AMDGPU_CRTC_IRQ_VBLANK3;
case 3:
return AMDGPU_CRTC_IRQ_VBLANK4;
case 4:
return AMDGPU_CRTC_IRQ_VBLANK5;
case 5:
return AMDGPU_CRTC_IRQ_VBLANK6;
default:
return AMDGPU_CRTC_IRQ_NONE;
}
}
bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
bool in_vblank_irq, int *vpos,
int *hpos, ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = crtc->index;
return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
stime, etime, mode);
}
static bool
amdgpu_display_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
{
struct drm_device *dev = adev_to_drm(adev);
struct drm_fb_helper *fb_helper = dev->fb_helper;
if (!fb_helper || !fb_helper->buffer)
return false;
if (gem_to_amdgpu_bo(fb_helper->buffer->gem) != robj)
return false;
return true;
}
int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
{
struct drm_device *dev = adev_to_drm(adev);
struct drm_crtc *crtc;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
int r;
drm_kms_helper_poll_disable(dev);
/* turn off display hw */
drm_modeset_lock_all(dev);
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter)
drm_helper_connector_dpms(connector,
DRM_MODE_DPMS_OFF);
drm_connector_list_iter_end(&iter);
drm_modeset_unlock_all(dev);
/* unpin the front buffers and cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
struct amdgpu_bo *robj;
if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
}
}
if (!fb || !fb->obj[0])
continue;
robj = gem_to_amdgpu_bo(fb->obj[0]);
if (!amdgpu_display_robj_is_fb(adev, robj)) {
r = amdgpu_bo_reserve(robj, true);
if (r == 0) {
amdgpu_bo_unpin(robj);
amdgpu_bo_unreserve(robj);
}
}
}
return 0;
}
int amdgpu_display_resume_helper(struct amdgpu_device *adev)
{
struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct drm_crtc *crtc;
int r;
/* pin cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
if (r != 0)
dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
amdgpu_bo_unreserve(aobj);
}
}
}
drm_helper_resume_force_mode(dev);
/* turn on display hw */
drm_modeset_lock_all(dev);
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter)
drm_helper_connector_dpms(connector,
DRM_MODE_DPMS_ON);
drm_connector_list_iter_end(&iter);
drm_modeset_unlock_all(dev);
drm_kms_helper_poll_enable(dev);
return 0;
}
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_display.c |
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
* Copyright 2009 Jerome Glisse.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
* Jerome Glisse
*/
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <drm/amdgpu_drm.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
#endif
#include "amdgpu.h"
#include <drm/drm_drv.h>
#include <drm/ttm/ttm_tt.h>
/*
* GART
* The GART (Graphics Aperture Remapping Table) is an aperture
* in the GPU's address space. System pages can be mapped into
* the aperture and look like contiguous pages from the GPU's
* perspective. A page table maps the pages in the aperture
* to the actual backing pages in system memory.
*
* Radeon GPUs support both an internal GART, as described above,
* and AGP. AGP works similarly, but the GART table is configured
* and maintained by the northbridge rather than the driver.
* Radeon hw has a separate AGP aperture that is programmed to
* point to the AGP aperture provided by the northbridge and the
* requests are passed through to the northbridge aperture.
* Both AGP and internal GART can be used at the same time, however
* that is not currently supported by the driver.
*
* This file handles the common internal GART management.
*/
/*
* Common GART table functions.
*/
/**
* amdgpu_gart_dummy_page_init - init dummy page used by the driver
*
* @adev: amdgpu_device pointer
*
* Allocate the dummy page used by the driver (all asics).
* This dummy page is used by the driver as a filler for gart entries
* when pages are taken out of the GART
* Returns 0 on sucess, -ENOMEM on failure.
*/
static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
{
struct page *dummy_page = ttm_glob.dummy_read_page;
if (adev->dummy_page_addr)
return 0;
adev->dummy_page_addr = dma_map_page(&adev->pdev->dev, dummy_page, 0,
PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(&adev->pdev->dev, adev->dummy_page_addr)) {
dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
adev->dummy_page_addr = 0;
return -ENOMEM;
}
return 0;
}
/**
* amdgpu_gart_dummy_page_fini - free dummy page used by the driver
*
* @adev: amdgpu_device pointer
*
* Frees the dummy page used by the driver (all asics).
*/
void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev)
{
if (!adev->dummy_page_addr)
return;
dma_unmap_page(&adev->pdev->dev, adev->dummy_page_addr, PAGE_SIZE,
DMA_BIDIRECTIONAL);
adev->dummy_page_addr = 0;
}
/**
* amdgpu_gart_table_ram_alloc - allocate system ram for gart page table
*
* @adev: amdgpu_device pointer
*
* Allocate system memory for GART page table for ASICs that don't have
* dedicated VRAM.
* Returns 0 for success, error for failure.
*/
int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev)
{
unsigned int order = get_order(adev->gart.table_size);
gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
struct amdgpu_bo *bo = NULL;
struct sg_table *sg = NULL;
struct amdgpu_bo_param bp;
dma_addr_t dma_addr;
struct page *p;
int ret;
if (adev->gart.bo != NULL)
return 0;
p = alloc_pages(gfp_flags, order);
if (!p)
return -ENOMEM;
/* If the hardware does not support UTCL2 snooping of the CPU caches
* then set_memory_wc() could be used as a workaround to mark the pages
* as write combine memory.
*/
dma_addr = dma_map_page(&adev->pdev->dev, p, 0, adev->gart.table_size,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(&adev->pdev->dev, dma_addr)) {
dev_err(&adev->pdev->dev, "Failed to DMA MAP the GART BO page\n");
__free_pages(p, order);
p = NULL;
return -EFAULT;
}
dev_info(adev->dev, "%s dma_addr:%pad\n", __func__, &dma_addr);
/* Create SG table */
sg = kmalloc(sizeof(*sg), GFP_KERNEL);
if (!sg) {
ret = -ENOMEM;
goto error;
}
ret = sg_alloc_table(sg, 1, GFP_KERNEL);
if (ret)
goto error;
sg_dma_address(sg->sgl) = dma_addr;
sg->sgl->length = adev->gart.table_size;
#ifdef CONFIG_NEED_SG_DMA_LENGTH
sg->sgl->dma_length = adev->gart.table_size;
#endif
/* Create SG BO */
memset(&bp, 0, sizeof(bp));
bp.size = adev->gart.table_size;
bp.byte_align = PAGE_SIZE;
bp.domain = AMDGPU_GEM_DOMAIN_CPU;
bp.type = ttm_bo_type_sg;
bp.resv = NULL;
bp.bo_ptr_size = sizeof(struct amdgpu_bo);
bp.flags = 0;
ret = amdgpu_bo_create(adev, &bp, &bo);
if (ret)
goto error;
bo->tbo.sg = sg;
bo->tbo.ttm->sg = sg;
bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
ret = amdgpu_bo_reserve(bo, true);
if (ret) {
dev_err(adev->dev, "(%d) failed to reserve bo for GART system bo\n", ret);
goto error;
}
ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
WARN(ret, "Pinning the GART table failed");
if (ret)
goto error_resv;
adev->gart.bo = bo;
adev->gart.ptr = page_to_virt(p);
/* Make GART table accessible in VMID0 */
ret = amdgpu_ttm_alloc_gart(&adev->gart.bo->tbo);
if (ret)
amdgpu_gart_table_ram_free(adev);
amdgpu_bo_unreserve(bo);
return 0;
error_resv:
amdgpu_bo_unreserve(bo);
error:
amdgpu_bo_unref(&bo);
if (sg) {
sg_free_table(sg);
kfree(sg);
}
__free_pages(p, order);
return ret;
}
/**
* amdgpu_gart_table_ram_free - free gart page table system ram
*
* @adev: amdgpu_device pointer
*
* Free the system memory used for the GART page tableon ASICs that don't
* have dedicated VRAM.
*/
void amdgpu_gart_table_ram_free(struct amdgpu_device *adev)
{
unsigned int order = get_order(adev->gart.table_size);
struct sg_table *sg = adev->gart.bo->tbo.sg;
struct page *p;
int ret;
ret = amdgpu_bo_reserve(adev->gart.bo, false);
if (!ret) {
amdgpu_bo_unpin(adev->gart.bo);
amdgpu_bo_unreserve(adev->gart.bo);
}
amdgpu_bo_unref(&adev->gart.bo);
sg_free_table(sg);
kfree(sg);
p = virt_to_page(adev->gart.ptr);
__free_pages(p, order);
adev->gart.ptr = NULL;
}
/**
* amdgpu_gart_table_vram_alloc - allocate vram for gart page table
*
* @adev: amdgpu_device pointer
*
* Allocate video memory for GART page table
* (pcie r4xx, r5xx+). These asics require the
* gart table to be in video memory.
* Returns 0 for success, error for failure.
*/
int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
{
if (adev->gart.bo != NULL)
return 0;
return amdgpu_bo_create_kernel(adev, adev->gart.table_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, &adev->gart.bo,
NULL, (void *)&adev->gart.ptr);
}
/**
* amdgpu_gart_table_vram_free - free gart page table vram
*
* @adev: amdgpu_device pointer
*
* Free the video memory used for the GART page table
* (pcie r4xx, r5xx+). These asics require the gart table to
* be in video memory.
*/
void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
{
amdgpu_bo_free_kernel(&adev->gart.bo, NULL, (void *)&adev->gart.ptr);
}
/*
* Common gart functions.
*/
/**
* amdgpu_gart_unbind - unbind pages from the gart page table
*
* @adev: amdgpu_device pointer
* @offset: offset into the GPU's gart aperture
* @pages: number of pages to unbind
*
* Unbinds the requested pages from the gart page table and
* replaces them with the dummy page (all asics).
* Returns 0 for success, -EINVAL for failure.
*/
void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
int pages)
{
unsigned t;
unsigned p;
int i, j;
u64 page_base;
/* Starting from VEGA10, system bit must be 0 to mean invalid. */
uint64_t flags = 0;
int idx;
if (!adev->gart.ptr)
return;
if (!drm_dev_enter(adev_to_drm(adev), &idx))
return;
t = offset / AMDGPU_GPU_PAGE_SIZE;
p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
for (i = 0; i < pages; i++, p++) {
page_base = adev->dummy_page_addr;
if (!adev->gart.ptr)
continue;
for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
t, page_base, flags);
page_base += AMDGPU_GPU_PAGE_SIZE;
}
}
mb();
amdgpu_device_flush_hdp(adev, NULL);
for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS)
amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
drm_dev_exit(idx);
}
/**
* amdgpu_gart_map - map dma_addresses into GART entries
*
* @adev: amdgpu_device pointer
* @offset: offset into the GPU's gart aperture
* @pages: number of pages to bind
* @dma_addr: DMA addresses of pages
* @flags: page table entry flags
* @dst: CPU address of the gart table
*
* Map the dma_addresses into GART entries (all asics).
* Returns 0 for success, -EINVAL for failure.
*/
void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
int pages, dma_addr_t *dma_addr, uint64_t flags,
void *dst)
{
uint64_t page_base;
unsigned i, j, t;
int idx;
if (!drm_dev_enter(adev_to_drm(adev), &idx))
return;
t = offset / AMDGPU_GPU_PAGE_SIZE;
for (i = 0; i < pages; i++) {
page_base = dma_addr[i];
for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags);
page_base += AMDGPU_GPU_PAGE_SIZE;
}
}
drm_dev_exit(idx);
}
/**
* amdgpu_gart_bind - bind pages into the gart page table
*
* @adev: amdgpu_device pointer
* @offset: offset into the GPU's gart aperture
* @pages: number of pages to bind
* @dma_addr: DMA addresses of pages
* @flags: page table entry flags
*
* Binds the requested pages to the gart page table
* (all asics).
* Returns 0 for success, -EINVAL for failure.
*/
void amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, dma_addr_t *dma_addr,
uint64_t flags)
{
if (!adev->gart.ptr)
return;
amdgpu_gart_map(adev, offset, pages, dma_addr, flags, adev->gart.ptr);
}
/**
* amdgpu_gart_invalidate_tlb - invalidate gart TLB
*
* @adev: amdgpu device driver pointer
*
* Invalidate gart TLB which can be use as a way to flush gart changes
*
*/
void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev)
{
int i;
if (!adev->gart.ptr)
return;
mb();
amdgpu_device_flush_hdp(adev, NULL);
for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS)
amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
}
/**
* amdgpu_gart_init - init the driver info for managing the gart
*
* @adev: amdgpu_device pointer
*
* Allocate the dummy page and init the gart driver info (all asics).
* Returns 0 for success, error for failure.
*/
int amdgpu_gart_init(struct amdgpu_device *adev)
{
int r;
if (adev->dummy_page_addr)
return 0;
/* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
DRM_ERROR("Page size is smaller than GPU page size!\n");
return -EINVAL;
}
r = amdgpu_gart_dummy_page_init(adev);
if (r)
return r;
/* Compute table size */
adev->gart.num_cpu_pages = adev->gmc.gart_size / PAGE_SIZE;
adev->gart.num_gpu_pages = adev->gmc.gart_size / AMDGPU_GPU_PAGE_SIZE;
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
return 0;
}
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "hdp_v4_0.h"
#include "amdgpu_ras.h"
#include "hdp/hdp_4_0_offset.h"
#include "hdp/hdp_4_0_sh_mask.h"
#include <uapi/linux/kfd_ioctl.h>
/* for Vega20 register name change */
#define mmHDP_MEM_POWER_CTRL 0x00d4
#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L
#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
else
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 0) ||
adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 2))
return;
if (!ring || !ring->funcs->emit_wreg)
WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
else
amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
}
static void hdp_v4_0_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
err_data->ue_count = 0;
err_data->ce_count = 0;
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP))
return;
/* HDP SRAM errors are uncorrectable ones (i.e. fatal errors) */
err_data->ue_count += RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT);
};
static void hdp_v4_0_reset_ras_error_count(struct amdgpu_device *adev)
{
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP))
return;
if (adev->ip_versions[HDP_HWIP][0] >= IP_VERSION(4, 4, 0))
WREG32_SOC15(HDP, 0, mmHDP_EDC_CNT, 0);
else
/*read back hdp ras counter to reset it to 0 */
RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT);
}
static void hdp_v4_0_update_clock_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t def, data;
if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 0, 0) ||
adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 0, 1) ||
adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 1, 1) ||
adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 1, 0)) {
def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
else
data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
if (def != data)
WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
} else {
def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
else
data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
if (def != data)
WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data);
}
}
static void hdp_v4_0_get_clockgating_state(struct amdgpu_device *adev,
u64 *flags)
{
int data;
/* AMD_CG_SUPPORT_HDP_LS */
data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
*flags |= AMD_CG_SUPPORT_HDP_LS;
}
static void hdp_v4_0_init_registers(struct amdgpu_device *adev)
{
switch (adev->ip_versions[HDP_HWIP][0]) {
case IP_VERSION(4, 2, 1):
WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);
break;
default:
break;
}
WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 0))
WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, READ_BUFFER_WATERMARK, 2);
WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
}
struct amdgpu_ras_block_hw_ops hdp_v4_0_ras_hw_ops = {
.query_ras_error_count = hdp_v4_0_query_ras_error_count,
.reset_ras_error_count = hdp_v4_0_reset_ras_error_count,
};
struct amdgpu_hdp_ras hdp_v4_0_ras = {
.ras_block = {
.hw_ops = &hdp_v4_0_ras_hw_ops,
},
};
const struct amdgpu_hdp_funcs hdp_v4_0_funcs = {
.flush_hdp = hdp_v4_0_flush_hdp,
.invalidate_hdp = hdp_v4_0_invalidate_hdp,
.update_clock_gating = hdp_v4_0_update_clock_gating,
.get_clock_gating_state = hdp_v4_0_get_clockgating_state,
.init_registers = hdp_v4_0_init_registers,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c |
/*
* Copyright 2014-2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"
#include "vega10_enum.h"
#include "sdma0/sdma0_4_0_offset.h"
#include "sdma0/sdma0_4_0_sh_mask.h"
#include "sdma1/sdma1_4_0_offset.h"
#include "sdma1/sdma1_4_0_sh_mask.h"
#include "athub/athub_1_0_offset.h"
#include "athub/athub_1_0_sh_mask.h"
#include "oss/osssys_4_0_offset.h"
#include "oss/osssys_4_0_sh_mask.h"
#include "soc15_common.h"
#include "v9_structs.h"
#include "soc15.h"
#include "soc15d.h"
#include "gfx_v9_0.h"
#include "amdgpu_amdkfd_gfx_v9.h"
#include <uapi/linux/kfd_ioctl.h>
enum hqd_dequeue_request_type {
NO_ACTION = 0,
DRAIN_PIPE,
RESET_WAVES,
SAVE_WAVES
};
static void kgd_gfx_v9_lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
uint32_t queue, uint32_t vmid, uint32_t inst)
{
mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, mec, pipe, queue, vmid, GET_INST(GC, inst));
}
static void kgd_gfx_v9_unlock_srbm(struct amdgpu_device *adev, uint32_t inst)
{
soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, inst));
mutex_unlock(&adev->srbm_mutex);
}
void kgd_gfx_v9_acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
uint32_t queue_id, uint32_t inst)
{
uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
kgd_gfx_v9_lock_srbm(adev, mec, pipe, queue_id, 0, inst);
}
uint64_t kgd_gfx_v9_get_queue_mask(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id)
{
unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe +
queue_id;
return 1ull << bit;
}
void kgd_gfx_v9_release_queue(struct amdgpu_device *adev, uint32_t inst)
{
kgd_gfx_v9_unlock_srbm(adev, inst);
}
void kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid,
uint32_t sh_mem_config,
uint32_t sh_mem_ape1_base,
uint32_t sh_mem_ape1_limit,
uint32_t sh_mem_bases, uint32_t inst)
{
kgd_gfx_v9_lock_srbm(adev, 0, 0, 0, vmid, inst);
WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmSH_MEM_CONFIG), sh_mem_config);
WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmSH_MEM_BASES), sh_mem_bases);
/* APE1 no longer exists on GFX9 */
kgd_gfx_v9_unlock_srbm(adev, inst);
}
int kgd_gfx_v9_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
unsigned int vmid, uint32_t inst)
{
/*
* We have to assume that there is no outstanding mapping.
* The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
* a mapping is in progress or because a mapping finished
* and the SW cleared it.
* So the protocol is to always wait & clear.
*/
uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
ATC_VMID0_PASID_MAPPING__VALID_MASK;
/*
* need to do this twice, once for gfx and once for mmhub
* for ATC add 16 to VMID for mmhub, for IH different registers.
* ATC_VMID0..15 registers are separate from ATC_VMID16..31.
*/
WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid,
pasid_mapping);
while (!(RREG32(SOC15_REG_OFFSET(
ATHUB, 0,
mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) &
(1U << vmid)))
cpu_relax();
WREG32(SOC15_REG_OFFSET(ATHUB, 0,
mmATC_VMID_PASID_MAPPING_UPDATE_STATUS),
1U << vmid);
/* Mapping vmid to pasid also for IH block */
WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid,
pasid_mapping);
WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID16_PASID_MAPPING) + vmid,
pasid_mapping);
while (!(RREG32(SOC15_REG_OFFSET(
ATHUB, 0,
mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) &
(1U << (vmid + 16))))
cpu_relax();
WREG32(SOC15_REG_OFFSET(ATHUB, 0,
mmATC_VMID_PASID_MAPPING_UPDATE_STATUS),
1U << (vmid + 16));
/* Mapping vmid to pasid also for IH block */
WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid,
pasid_mapping);
return 0;
}
/* TODO - RING0 form of field is obsolete, seems to date back to SI
* but still works
*/
int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id,
uint32_t inst)
{
uint32_t mec;
uint32_t pipe;
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
kgd_gfx_v9_lock_srbm(adev, mec, pipe, 0, 0, inst);
WREG32_SOC15(GC, GET_INST(GC, inst), mmCPC_INT_CNTL,
CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
kgd_gfx_v9_unlock_srbm(adev, inst);
return 0;
}
static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
unsigned int engine_id,
unsigned int queue_id)
{
uint32_t sdma_engine_reg_base = 0;
uint32_t sdma_rlc_reg_offset;
switch (engine_id) {
default:
dev_warn(adev->dev,
"Invalid sdma engine id (%d), using engine id 0\n",
engine_id);
fallthrough;
case 0:
sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
break;
case 1:
sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
break;
}
sdma_rlc_reg_offset = sdma_engine_reg_base
+ queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
queue_id, sdma_rlc_reg_offset);
return sdma_rlc_reg_offset;
}
static inline struct v9_mqd *get_mqd(void *mqd)
{
return (struct v9_mqd *)mqd;
}
static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
{
return (struct v9_sdma_mqd *)mqd;
}
int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t __user *wptr, uint32_t wptr_shift,
uint32_t wptr_mask, struct mm_struct *mm,
uint32_t inst)
{
struct v9_mqd *m;
uint32_t *mqd_hqd;
uint32_t reg, hqd_base, data;
m = get_mqd(mqd);
kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
mqd_hqd = &m->cp_mqd_base_addr_lo;
hqd_base = SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_MQD_BASE_ADDR);
for (reg = hqd_base;
reg <= SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_HI); reg++)
WREG32_RLC(reg, mqd_hqd[reg - hqd_base]);
/* Activate doorbell logic before triggering WPTR poll. */
data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_DOORBELL_CONTROL),
data);
if (wptr) {
/* Don't read wptr with get_user because the user
* context may not be accessible (if this function
* runs in a work queue). Instead trigger a one-shot
* polling read from memory in the CP. This assumes
* that wptr is GPU-accessible in the queue's VMID via
* ATC or SVM. WPTR==RPTR before starting the poll so
* the CP starts fetching new commands from the right
* place.
*
* Guessing a 64-bit WPTR from a 32-bit RPTR is a bit
* tricky. Assume that the queue didn't overflow. The
* number of valid bits in the 32-bit RPTR depends on
* the queue size. The remaining bits are taken from
* the saved 64-bit WPTR. If the WPTR wrapped, add the
* queue size.
*/
uint32_t queue_size =
2 << REG_GET_FIELD(m->cp_hqd_pq_control,
CP_HQD_PQ_CONTROL, QUEUE_SIZE);
uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1);
if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr)
guessed_wptr += queue_size;
guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_LO),
lower_32_bits(guessed_wptr));
WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_HI),
upper_32_bits(guessed_wptr));
WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_POLL_ADDR),
lower_32_bits((uintptr_t)wptr));
WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
upper_32_bits((uintptr_t)wptr));
WREG32_SOC15(GC, GET_INST(GC, inst), mmCP_PQ_WPTR_POLL_CNTL1,
(uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id, queue_id));
}
/* Start the EOP fetcher */
WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_EOP_RPTR),
REG_SET_FIELD(m->cp_hqd_eop_rptr,
CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE), data);
kgd_gfx_v9_release_queue(adev, inst);
return 0;
}
int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t doorbell_off, uint32_t inst)
{
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[inst].ring;
struct v9_mqd *m;
uint32_t mec, pipe;
int r;
m = get_mqd(mqd);
kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
mec, pipe, queue_id);
spin_lock(&adev->gfx.kiq[inst].ring_lock);
r = amdgpu_ring_alloc(kiq_ring, 7);
if (r) {
pr_err("Failed to alloc KIQ (%d).\n", r);
goto out_unlock;
}
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
amdgpu_ring_write(kiq_ring,
PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */
PACKET3_MAP_QUEUES_QUEUE(queue_id) |
PACKET3_MAP_QUEUES_PIPE(pipe) |
PACKET3_MAP_QUEUES_ME((mec - 1)) |
PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */
PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
amdgpu_ring_write(kiq_ring,
PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off));
amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo);
amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi);
amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo);
amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi);
amdgpu_ring_commit(kiq_ring);
out_unlock:
spin_unlock(&adev->gfx.kiq[inst].ring_lock);
kgd_gfx_v9_release_queue(adev, inst);
return r;
}
int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev,
uint32_t pipe_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst)
{
uint32_t i = 0, reg;
#define HQD_N_REGS 56
#define DUMP_REG(addr) do { \
if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
break; \
(*dump)[i][0] = (addr) << 2; \
(*dump)[i++][1] = RREG32(addr); \
} while (0)
*dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
for (reg = SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_MQD_BASE_ADDR);
reg <= SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_HI); reg++)
DUMP_REG(reg);
kgd_gfx_v9_release_queue(adev, inst);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
return 0;
}
static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd,
uint32_t __user *wptr, struct mm_struct *mm)
{
struct v9_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
unsigned long end_jiffies;
uint32_t data;
uint64_t data64;
uint64_t __user *wptr64 = (uint64_t __user *)wptr;
m = get_sdma_mqd(mqd);
sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
end_jiffies = msecs_to_jiffies(2000) + jiffies;
while (true) {
data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
if (time_after(jiffies, end_jiffies)) {
pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
}
usleep_range(500, 1000);
}
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
m->sdmax_rlcx_doorbell_offset);
data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
ENABLE, 1);
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
m->sdmax_rlcx_rb_rptr);
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
if (read_user_wptr(mm, wptr64, data64)) {
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
lower_32_bits(data64));
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
upper_32_bits(data64));
} else {
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
m->sdmax_rlcx_rb_rptr);
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
m->sdmax_rlcx_rb_rptr_hi);
}
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
m->sdmax_rlcx_rb_base_hi);
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdmax_rlcx_rb_rptr_addr_lo);
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdmax_rlcx_rb_rptr_addr_hi);
data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
RB_ENABLE, 1);
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
return 0;
}
static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
uint32_t engine_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
{
uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
engine_id, queue_id);
uint32_t i = 0, reg;
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+10)
*dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
DUMP_REG(sdma_rlc_reg_offset + reg);
for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
DUMP_REG(sdma_rlc_reg_offset + reg);
WARN_ON_ONCE(i != HQD_N_REGS);
*n_regs = i;
return 0;
}
bool kgd_gfx_v9_hqd_is_occupied(struct amdgpu_device *adev,
uint64_t queue_address, uint32_t pipe_id,
uint32_t queue_id, uint32_t inst)
{
uint32_t act;
bool retval = false;
uint32_t low, high;
kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
act = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE);
if (act) {
low = lower_32_bits(queue_address >> 8);
high = upper_32_bits(queue_address >> 8);
if (low == RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE) &&
high == RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE_HI))
retval = true;
}
kgd_gfx_v9_release_queue(adev, inst);
return retval;
}
static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd)
{
struct v9_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
m = get_sdma_mqd(mqd);
sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
return true;
return false;
}
int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd,
enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id, uint32_t inst)
{
enum hqd_dequeue_request_type type;
unsigned long end_jiffies;
uint32_t temp;
struct v9_mqd *m = get_mqd(mqd);
if (amdgpu_in_reset(adev))
return -EIO;
kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
if (m->cp_hqd_vmid == 0)
WREG32_FIELD15_RLC(GC, GET_INST(GC, inst), RLC_CP_SCHEDULERS, scheduler1, 0);
switch (reset_type) {
case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
type = DRAIN_PIPE;
break;
case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
type = RESET_WAVES;
break;
case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
type = SAVE_WAVES;
break;
default:
type = DRAIN_PIPE;
break;
}
WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_DEQUEUE_REQUEST), type);
end_jiffies = (utimeout * HZ / 1000) + jiffies;
while (true) {
temp = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE);
if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
break;
if (time_after(jiffies, end_jiffies)) {
pr_err("cp queue preemption time out.\n");
kgd_gfx_v9_release_queue(adev, inst);
return -ETIME;
}
usleep_range(500, 1000);
}
kgd_gfx_v9_release_queue(adev, inst);
return 0;
}
static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
unsigned int utimeout)
{
struct v9_sdma_mqd *m;
uint32_t sdma_rlc_reg_offset;
uint32_t temp;
unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
m->sdma_queue_id);
temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
while (true) {
temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
if (time_after(jiffies, end_jiffies)) {
pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
}
usleep_range(500, 1000);
}
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
m->sdmax_rlcx_rb_rptr_hi =
RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
return 0;
}
bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
uint8_t vmid, uint16_t *p_pasid)
{
uint32_t value;
value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ vmid);
*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev,
uint32_t gfx_index_val,
uint32_t sq_cmd, uint32_t inst)
{
uint32_t data = 0;
mutex_lock(&adev->grbm_idx_mutex);
WREG32_SOC15_RLC_SHADOW(GC, GET_INST(GC, inst), mmGRBM_GFX_INDEX, gfx_index_val);
WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_CMD, sq_cmd);
data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
INSTANCE_BROADCAST_WRITES, 1);
data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
SH_BROADCAST_WRITES, 1);
data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
SE_BROADCAST_WRITES, 1);
WREG32_SOC15_RLC_SHADOW(GC, GET_INST(GC, inst), mmGRBM_GFX_INDEX, data);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
}
/*
* GFX9 helper for wave launch stall requirements on debug trap setting.
*
* vmid:
* Target VMID to stall/unstall.
*
* stall:
* 0-unstall wave launch (enable), 1-stall wave launch (disable).
* After wavefront launch has been stalled, allocated waves must drain from
* SPI in order for debug trap settings to take effect on those waves.
* This is roughly a ~96 clock cycle wait on SPI where a read on
* SPI_GDBG_WAVE_CNTL translates to ~32 clock cycles.
* KGD_GFX_V9_WAVE_LAUNCH_SPI_DRAIN_LATENCY indicates the number of reads required.
*
* NOTE: We can afford to clear the entire STALL_VMID field on unstall
* because GFX9.4.1 cannot support multi-process debugging due to trap
* configuration and masking being limited to global scope. Always assume
* single process conditions.
*/
#define KGD_GFX_V9_WAVE_LAUNCH_SPI_DRAIN_LATENCY 3
void kgd_gfx_v9_set_wave_launch_stall(struct amdgpu_device *adev,
uint32_t vmid,
bool stall)
{
int i;
uint32_t data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1))
data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_VMID,
stall ? 1 << vmid : 0);
else
data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA,
stall ? 1 : 0);
WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data);
if (!stall)
return;
for (i = 0; i < KGD_GFX_V9_WAVE_LAUNCH_SPI_DRAIN_LATENCY; i++)
RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
}
/*
* restore_dbg_registers is ignored here but is a general interface requirement
* for devices that support GFXOFF and where the RLC save/restore list
* does not support hw registers for debugging i.e. the driver has to manually
* initialize the debug mode registers after it has disabled GFX off during the
* debug session.
*/
uint32_t kgd_gfx_v9_enable_debug_trap(struct amdgpu_device *adev,
bool restore_dbg_registers,
uint32_t vmid)
{
mutex_lock(&adev->grbm_idx_mutex);
kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true);
WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
kgd_gfx_v9_set_wave_launch_stall(adev, vmid, false);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
}
/*
* keep_trap_enabled is ignored here but is a general interface requirement
* for devices that support multi-process debugging where the performance
* overhead from trap temporary setup needs to be bypassed when the debug
* session has ended.
*/
uint32_t kgd_gfx_v9_disable_debug_trap(struct amdgpu_device *adev,
bool keep_trap_enabled,
uint32_t vmid)
{
mutex_lock(&adev->grbm_idx_mutex);
kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true);
WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
kgd_gfx_v9_set_wave_launch_stall(adev, vmid, false);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
}
int kgd_gfx_v9_validate_trap_override_request(struct amdgpu_device *adev,
uint32_t trap_override,
uint32_t *trap_mask_supported)
{
*trap_mask_supported &= KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH;
/* The SPI_GDBG_TRAP_MASK register is global and affects all
* processes. Only allow OR-ing the address-watch bit, since
* this only affects processes under the debugger. Other bits
* should stay 0 to avoid the debugger interfering with other
* processes.
*/
if (trap_override != KFD_DBG_TRAP_OVERRIDE_OR)
return -EINVAL;
return 0;
}
uint32_t kgd_gfx_v9_set_wave_launch_trap_override(struct amdgpu_device *adev,
uint32_t vmid,
uint32_t trap_override,
uint32_t trap_mask_bits,
uint32_t trap_mask_request,
uint32_t *trap_mask_prev,
uint32_t kfd_dbg_cntl_prev)
{
uint32_t data, wave_cntl_prev;
mutex_lock(&adev->grbm_idx_mutex);
wave_cntl_prev = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true);
data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK));
*trap_mask_prev = REG_GET_FIELD(data, SPI_GDBG_TRAP_MASK, EXCP_EN);
trap_mask_bits = (trap_mask_bits & trap_mask_request) |
(*trap_mask_prev & ~trap_mask_request);
data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK, EXCP_EN, trap_mask_bits);
data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK, REPLACE, trap_override);
WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), data);
/* We need to preserve wave launch mode stall settings. */
WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), wave_cntl_prev);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
}
uint32_t kgd_gfx_v9_set_wave_launch_mode(struct amdgpu_device *adev,
uint8_t wave_launch_mode,
uint32_t vmid)
{
uint32_t data = 0;
bool is_mode_set = !!wave_launch_mode;
mutex_lock(&adev->grbm_idx_mutex);
kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true);
data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2,
VMID_MASK, is_mode_set ? 1 << vmid : 0);
data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2,
MODE, is_mode_set ? wave_launch_mode : 0);
WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL2), data);
kgd_gfx_v9_set_wave_launch_stall(adev, vmid, false);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
}
#define TCP_WATCH_STRIDE (mmTCP_WATCH1_ADDR_H - mmTCP_WATCH0_ADDR_H)
uint32_t kgd_gfx_v9_set_address_watch(struct amdgpu_device *adev,
uint64_t watch_address,
uint32_t watch_address_mask,
uint32_t watch_id,
uint32_t watch_mode,
uint32_t debug_vmid,
uint32_t inst)
{
uint32_t watch_address_high;
uint32_t watch_address_low;
uint32_t watch_address_cntl;
watch_address_cntl = 0;
watch_address_low = lower_32_bits(watch_address);
watch_address_high = upper_32_bits(watch_address) & 0xffff;
watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
TCP_WATCH0_CNTL,
VMID,
debug_vmid);
watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
TCP_WATCH0_CNTL,
MODE,
watch_mode);
watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
TCP_WATCH0_CNTL,
MASK,
watch_address_mask >> 6);
/* Turning off this watch point until we set all the registers */
watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
TCP_WATCH0_CNTL,
VALID,
0);
WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) +
(watch_id * TCP_WATCH_STRIDE)),
watch_address_cntl);
WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_H) +
(watch_id * TCP_WATCH_STRIDE)),
watch_address_high);
WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_L) +
(watch_id * TCP_WATCH_STRIDE)),
watch_address_low);
/* Enable the watch point */
watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
TCP_WATCH0_CNTL,
VALID,
1);
WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) +
(watch_id * TCP_WATCH_STRIDE)),
watch_address_cntl);
return 0;
}
uint32_t kgd_gfx_v9_clear_address_watch(struct amdgpu_device *adev,
uint32_t watch_id)
{
uint32_t watch_address_cntl;
watch_address_cntl = 0;
WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) +
(watch_id * TCP_WATCH_STRIDE)),
watch_address_cntl);
return 0;
}
/* kgd_gfx_v9_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values
* The values read are:
* ib_offload_wait_time -- Wait Count for Indirect Buffer Offloads.
* atomic_offload_wait_time -- Wait Count for L2 and GDS Atomics Offloads.
* wrm_offload_wait_time -- Wait Count for WAIT_REG_MEM Offloads.
* gws_wait_time -- Wait Count for Global Wave Syncs.
* que_sleep_wait_time -- Wait Count for Dequeue Retry.
* sch_wave_wait_time -- Wait Count for Scheduling Wave Message.
* sem_rearm_wait_time -- Wait Count for Semaphore re-arm.
* deq_retry_wait_time -- Wait Count for Global Wave Syncs.
*/
void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev,
uint32_t *wait_times,
uint32_t inst)
{
*wait_times = RREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
mmCP_IQ_WAIT_TIME2));
}
void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev,
uint32_t vmid, uint64_t page_table_base)
{
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
pr_err("trying to set page table base for wrong VMID %u\n",
vmid);
return;
}
adev->mmhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
}
static void lock_spi_csq_mutexes(struct amdgpu_device *adev)
{
mutex_lock(&adev->srbm_mutex);
mutex_lock(&adev->grbm_idx_mutex);
}
static void unlock_spi_csq_mutexes(struct amdgpu_device *adev)
{
mutex_unlock(&adev->grbm_idx_mutex);
mutex_unlock(&adev->srbm_mutex);
}
/**
* get_wave_count: Read device registers to get number of waves in flight for
* a particular queue. The method also returns the VMID associated with the
* queue.
*
* @adev: Handle of device whose registers are to be read
* @queue_idx: Index of queue in the queue-map bit-field
* @wave_cnt: Output parameter updated with number of waves in flight
* @vmid: Output parameter updated with VMID of queue whose wave count
* is being collected
* @inst: xcc's instance number on a multi-XCC setup
*/
static void get_wave_count(struct amdgpu_device *adev, int queue_idx,
int *wave_cnt, int *vmid, uint32_t inst)
{
int pipe_idx;
int queue_slot;
unsigned int reg_val;
/*
* Program GRBM with appropriate MEID, PIPEID, QUEUEID and VMID
* parameters to read out waves in flight. Get VMID if there are
* non-zero waves in flight.
*/
*vmid = 0xFF;
*wave_cnt = 0;
pipe_idx = queue_idx / adev->gfx.mec.num_queue_per_pipe;
queue_slot = queue_idx % adev->gfx.mec.num_queue_per_pipe;
soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0, inst);
reg_val = RREG32_SOC15_IP(GC, SOC15_REG_OFFSET(GC, inst, mmSPI_CSQ_WF_ACTIVE_COUNT_0) +
queue_slot);
*wave_cnt = reg_val & SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK;
if (*wave_cnt != 0)
*vmid = (RREG32_SOC15(GC, inst, mmCP_HQD_VMID) &
CP_HQD_VMID__VMID_MASK) >> CP_HQD_VMID__VMID__SHIFT;
}
/**
* kgd_gfx_v9_get_cu_occupancy: Reads relevant registers associated with each
* shader engine and aggregates the number of waves that are in flight for the
* process whose pasid is provided as a parameter. The process could have ZERO
* or more queues running and submitting waves to compute units.
*
* @adev: Handle of device from which to get number of waves in flight
* @pasid: Identifies the process for which this query call is invoked
* @pasid_wave_cnt: Output parameter updated with number of waves in flight that
* belong to process with given pasid
* @max_waves_per_cu: Output parameter updated with maximum number of waves
* possible per Compute Unit
* @inst: xcc's instance number on a multi-XCC setup
*
* Note: It's possible that the device has too many queues (oversubscription)
* in which case a VMID could be remapped to a different PASID. This could lead
* to an inaccurate wave count. Following is a high-level sequence:
* Time T1: vmid = getVmid(); vmid is associated with Pasid P1
* Time T2: passId = getPasId(vmid); vmid is associated with Pasid P2
* In the sequence above wave count obtained from time T1 will be incorrectly
* lost or added to total wave count.
*
* The registers that provide the waves in flight are:
*
* SPI_CSQ_WF_ACTIVE_STATUS - bit-map of queues per pipe. The bit is ON if a
* queue is slotted, OFF if there is no queue. A process could have ZERO or
* more queues slotted and submitting waves to be run on compute units. Even
* when there is a queue it is possible there could be zero wave fronts, this
* can happen when queue is waiting on top-of-pipe events - e.g. waitRegMem
* command
*
* For each bit that is ON from above:
*
* Read (SPI_CSQ_WF_ACTIVE_COUNT_0 + queue_idx) register. It provides the
* number of waves that are in flight for the queue at specified index. The
* index ranges from 0 to 7.
*
* If non-zero waves are in flight, read CP_HQD_VMID register to obtain VMID
* of the wave(s).
*
* Determine if VMID from above step maps to pasid provided as parameter. If
* it matches agrregate the wave count. That the VMID will not match pasid is
* a normal condition i.e. a device is expected to support multiple queues
* from multiple proceses.
*
* Reading registers referenced above involves programming GRBM appropriately
*/
void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
int *pasid_wave_cnt, int *max_waves_per_cu, uint32_t inst)
{
int qidx;
int vmid;
int se_idx;
int sh_idx;
int se_cnt;
int sh_cnt;
int wave_cnt;
int queue_map;
int pasid_tmp;
int max_queue_cnt;
int vmid_wave_cnt = 0;
DECLARE_BITMAP(cp_queue_bitmap, KGD_MAX_QUEUES);
lock_spi_csq_mutexes(adev);
soc15_grbm_select(adev, 1, 0, 0, 0, inst);
/*
* Iterate through the shader engines and arrays of the device
* to get number of waves in flight
*/
bitmap_complement(cp_queue_bitmap, adev->gfx.mec_bitmap[0].queue_bitmap,
KGD_MAX_QUEUES);
max_queue_cnt = adev->gfx.mec.num_pipe_per_mec *
adev->gfx.mec.num_queue_per_pipe;
sh_cnt = adev->gfx.config.max_sh_per_se;
se_cnt = adev->gfx.config.max_shader_engines;
for (se_idx = 0; se_idx < se_cnt; se_idx++) {
for (sh_idx = 0; sh_idx < sh_cnt; sh_idx++) {
amdgpu_gfx_select_se_sh(adev, se_idx, sh_idx, 0xffffffff, inst);
queue_map = RREG32_SOC15(GC, inst, mmSPI_CSQ_WF_ACTIVE_STATUS);
/*
* Assumption: queue map encodes following schema: four
* pipes per each micro-engine, with each pipe mapping
* eight queues. This schema is true for GFX9 devices
* and must be verified for newer device families
*/
for (qidx = 0; qidx < max_queue_cnt; qidx++) {
/* Skip qeueus that are not associated with
* compute functions
*/
if (!test_bit(qidx, cp_queue_bitmap))
continue;
if (!(queue_map & (1 << qidx)))
continue;
/* Get number of waves in flight and aggregate them */
get_wave_count(adev, qidx, &wave_cnt, &vmid,
inst);
if (wave_cnt != 0) {
pasid_tmp =
RREG32(SOC15_REG_OFFSET(OSSSYS, inst,
mmIH_VMID_0_LUT) + vmid);
if (pasid_tmp == pasid)
vmid_wave_cnt += wave_cnt;
}
}
}
}
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, inst);
soc15_grbm_select(adev, 0, 0, 0, 0, inst);
unlock_spi_csq_mutexes(adev);
/* Update the output parameters and return */
*pasid_wave_cnt = vmid_wave_cnt;
*max_waves_per_cu = adev->gfx.cu_info.simd_per_cu *
adev->gfx.cu_info.max_waves_per_simd;
}
void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev,
uint32_t wait_times,
uint32_t grace_period,
uint32_t *reg_offset,
uint32_t *reg_data)
{
*reg_data = wait_times;
/*
* The CP cannot handle a 0 grace period input and will result in
* an infinite grace period being set so set to 1 to prevent this.
*/
if (grace_period == 0)
grace_period = 1;
*reg_data = REG_SET_FIELD(*reg_data,
CP_IQ_WAIT_TIME2,
SCH_WAVE,
grace_period);
*reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2);
}
void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev,
uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr, uint32_t inst)
{
kgd_gfx_v9_lock_srbm(adev, 0, 0, 0, vmid, inst);
/*
* Program TBA registers
*/
WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_SHADER_TBA_LO,
lower_32_bits(tba_addr >> 8));
WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_SHADER_TBA_HI,
upper_32_bits(tba_addr >> 8));
/*
* Program TMA registers
*/
WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_SHADER_TMA_LO,
lower_32_bits(tma_addr >> 8));
WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_SHADER_TMA_HI,
upper_32_bits(tma_addr >> 8));
kgd_gfx_v9_unlock_srbm(adev, inst);
}
const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
.init_interrupts = kgd_gfx_v9_init_interrupts,
.hqd_load = kgd_gfx_v9_hqd_load,
.hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load,
.hqd_sdma_load = kgd_hqd_sdma_load,
.hqd_dump = kgd_gfx_v9_hqd_dump,
.hqd_sdma_dump = kgd_hqd_sdma_dump,
.hqd_is_occupied = kgd_gfx_v9_hqd_is_occupied,
.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
.hqd_destroy = kgd_gfx_v9_hqd_destroy,
.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
.wave_control_execute = kgd_gfx_v9_wave_control_execute,
.get_atc_vmid_pasid_mapping_info =
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
.enable_debug_trap = kgd_gfx_v9_enable_debug_trap,
.disable_debug_trap = kgd_gfx_v9_disable_debug_trap,
.validate_trap_override_request = kgd_gfx_v9_validate_trap_override_request,
.set_wave_launch_trap_override = kgd_gfx_v9_set_wave_launch_trap_override,
.set_wave_launch_mode = kgd_gfx_v9_set_wave_launch_mode,
.set_address_watch = kgd_gfx_v9_set_address_watch,
.clear_address_watch = kgd_gfx_v9_clear_address_watch,
.get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
.build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c |
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "gfxhub_v2_0.h"
#include "gc/gc_10_1_0_offset.h"
#include "gc/gc_10_1_0_sh_mask.h"
#include "gc/gc_10_1_0_default.h"
#include "navi10_enum.h"
#include "soc15_common.h"
static const char * const gfxhub_client_ids[] = {
"CB/DB",
"Reserved",
"GE1",
"GE2",
"CPF",
"CPC",
"CPG",
"RLC",
"TCP",
"SQC (inst)",
"SQC (data)",
"SQG",
"Reserved",
"SDMA0",
"SDMA1",
"GCR",
"SDMA2",
"SDMA3",
};
static uint32_t gfxhub_v2_0_get_invalidate_req(unsigned int vmid,
uint32_t flush_type)
{
u32 req = 0;
/* invalidate using legacy mode on vmid*/
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
PER_VMID_INVALIDATE_REQ, 1 << vmid);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
return req;
}
static void
gfxhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
uint32_t status)
{
u32 cid = REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, CID);
dev_err(adev->dev,
"GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
status);
dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" : gfxhub_client_ids[cid],
cid);
dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
dev_err(adev->dev, "\t RW: 0x%lx\n",
REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, RW));
}
static u64 gfxhub_v2_0_get_fb_location(struct amdgpu_device *adev)
{
u64 base = RREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE);
base &= GCMC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
base <<= 24;
return base;
}
static u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev)
{
return (u64)RREG32_SOC15(GC, 0, mmGCMC_VM_FB_OFFSET) << 24;
}
static void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
hub->ctx_addr_distance * vmid,
lower_32_bits(page_table_base));
WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
hub->ctx_addr_distance * vmid,
upper_32_bits(page_table_base));
}
static void gfxhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev)
{
uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
gfxhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base);
WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
(u32)(adev->gmc.gart_start >> 12));
WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
(u32)(adev->gmc.gart_start >> 44));
WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
(u32)(adev->gmc.gart_end >> 12));
WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
(u32)(adev->gmc.gart_end >> 44));
}
static void gfxhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
{
uint64_t value;
if (!amdgpu_sriov_vf(adev)) {
/* Program the AGP BAR */
WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0);
WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
/* Program the system aperture low logical page number. */
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
(u32)(value >> 44));
}
/* Program "protection fault". */
WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
(u32)(adev->dummy_page_addr >> 12));
WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
(u32)((u64)adev->dummy_page_addr >> 44));
WREG32_FIELD15(GC, 0, GCVM_L2_PROTECTION_FAULT_CNTL2,
ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
}
static void gfxhub_v2_0_init_tlb_regs(struct amdgpu_device *adev)
{
uint32_t tmp;
/* Setup TLB control */
tmp = RREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL);
tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
ENABLE_ADVANCED_DRIVER_MODEL, 1);
tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
MTYPE, MTYPE_UC); /* UC, uncached */
WREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL, tmp);
}
static void gfxhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
{
uint32_t tmp;
/* These regs are not accessible for VF, PF will program these in SRIOV */
if (amdgpu_sriov_vf(adev))
return;
/* Setup L2 cache */
tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_CACHE, 1);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL,
ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
/* XXX for emulation, Refer to closed source code.*/
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL,
L2_PDE0_CACHE_TAG_GENERATION_MODE, 0);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL, tmp);
tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2, tmp);
tmp = mmGCVM_L2_CNTL3_DEFAULT;
if (adev->gmc.translate_further) {
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 12);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
} else {
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 9);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
}
WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, tmp);
tmp = mmGCVM_L2_CNTL4_DEFAULT;
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL4, tmp);
tmp = mmGCVM_L2_CNTL5_DEFAULT;
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0);
WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL5, tmp);
}
static void gfxhub_v2_0_enable_system_domain(struct amdgpu_device *adev)
{
uint32_t tmp;
tmp = RREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL, tmp);
}
static void gfxhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev)
{
WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
0xFFFFFFFF);
WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
0x0000000F);
WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
0);
WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
0);
WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
}
static void gfxhub_v2_0_setup_vmid_config(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
int i;
uint32_t tmp;
for (i = 0; i <= 14; i++) {
tmp = RREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_CNTL, i);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
adev->vm_manager.num_level);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
PAGE_TABLE_BLOCK_SIZE,
adev->vm_manager.block_size - 9);
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
!adev->gmc.noretry);
WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_CNTL,
i * hub->ctx_distance, tmp);
WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
i * hub->ctx_addr_distance, 0);
WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
i * hub->ctx_addr_distance, 0);
WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
i * hub->ctx_addr_distance,
lower_32_bits(adev->vm_manager.max_pfn - 1));
WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
i * hub->ctx_addr_distance,
upper_32_bits(adev->vm_manager.max_pfn - 1));
}
hub->vm_cntx_cntl = tmp;
}
static void gfxhub_v2_0_program_invalidation(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
unsigned int i;
for (i = 0 ; i < 18; ++i) {
WREG32_SOC15_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
i * hub->eng_addr_distance, 0xffffffff);
WREG32_SOC15_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
i * hub->eng_addr_distance, 0x1f);
}
}
static int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev)
{
/* GART Enable. */
gfxhub_v2_0_init_gart_aperture_regs(adev);
gfxhub_v2_0_init_system_aperture_regs(adev);
gfxhub_v2_0_init_tlb_regs(adev);
gfxhub_v2_0_init_cache_regs(adev);
gfxhub_v2_0_enable_system_domain(adev);
gfxhub_v2_0_disable_identity_aperture(adev);
gfxhub_v2_0_setup_vmid_config(adev);
gfxhub_v2_0_program_invalidation(adev);
return 0;
}
static void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
u32 tmp;
u32 i;
/* Disable all tables */
for (i = 0; i < 16; i++)
WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_CNTL,
i * hub->ctx_distance, 0);
/* Setup TLB control */
tmp = RREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL);
tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
ENABLE_ADVANCED_DRIVER_MODEL, 0);
WREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL, tmp);
if (!amdgpu_sriov_vf(adev)) {
/* Setup L2 cache */
WREG32_FIELD15(GC, 0, GCVM_L2_CNTL, ENABLE_L2_CACHE, 0);
WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, 0);
}
}
/**
* gfxhub_v2_0_set_fault_enable_default - update GART/VM fault handling
*
* @adev: amdgpu_device pointer
* @value: true redirects VM faults to the default page
*/
static void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value)
{
u32 tmp;
tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
value);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
if (!value) {
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
CRASH_ON_NO_RETRY_FAULT, 1);
tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL,
CRASH_ON_RETRY_FAULT, 1);
}
WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL, tmp);
}
static const struct amdgpu_vmhub_funcs gfxhub_v2_0_vmhub_funcs = {
.print_l2_protection_fault_status = gfxhub_v2_0_print_l2_protection_fault_status,
.get_invalidate_req = gfxhub_v2_0_get_invalidate_req,
};
static void gfxhub_v2_0_init(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
hub->ctx0_ptb_addr_lo32 =
SOC15_REG_OFFSET(GC, 0,
mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
hub->ctx0_ptb_addr_hi32 =
SOC15_REG_OFFSET(GC, 0,
mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
hub->vm_inv_eng0_sem =
SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_SEM);
hub->vm_inv_eng0_req =
SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_REQ);
hub->vm_inv_eng0_ack =
SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_ACK);
hub->vm_context0_cntl =
SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_CNTL);
hub->vm_l2_pro_fault_status =
SOC15_REG_OFFSET(GC, 0, mmGCVM_L2_PROTECTION_FAULT_STATUS);
hub->vm_l2_pro_fault_cntl =
SOC15_REG_OFFSET(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL);
hub->ctx_distance = mmGCVM_CONTEXT1_CNTL - mmGCVM_CONTEXT0_CNTL;
hub->ctx_addr_distance = mmGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
hub->eng_distance = mmGCVM_INVALIDATE_ENG1_REQ -
mmGCVM_INVALIDATE_ENG0_REQ;
hub->eng_addr_distance = mmGCVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
mmGCVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
hub->vm_cntx_cntl_vm_fault = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
hub->vmhub_funcs = &gfxhub_v2_0_vmhub_funcs;
}
const struct amdgpu_gfxhub_funcs gfxhub_v2_0_funcs = {
.get_fb_location = gfxhub_v2_0_get_fb_location,
.get_mc_fb_offset = gfxhub_v2_0_get_mc_fb_offset,
.setup_vm_pt_regs = gfxhub_v2_0_setup_vm_pt_regs,
.gart_enable = gfxhub_v2_0_gart_enable,
.gart_disable = gfxhub_v2_0_gart_disable,
.set_fault_enable_default = gfxhub_v2_0_set_fault_enable_default,
.init = gfxhub_v2_0_init,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c |
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "amdgpu_jpeg.h"
#include "soc15.h"
#include "soc15d.h"
#include "vcn_v1_0.h"
#include "jpeg_v1_0.h"
#include "vcn/vcn_1_0_offset.h"
#include "vcn/vcn_1_0_sh_mask.h"
static void jpeg_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev);
static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring);
static void jpeg_v1_0_decode_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val)
{
struct amdgpu_device *adev = ring->adev;
ring->ring[(*ptr)++] = PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
ring->ring[(*ptr)++] = 0;
ring->ring[(*ptr)++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0);
} else {
ring->ring[(*ptr)++] = reg_offset;
ring->ring[(*ptr)++] = PACKETJ(0, 0, 0, PACKETJ_TYPE0);
}
ring->ring[(*ptr)++] = val;
}
static void jpeg_v1_0_decode_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr)
{
struct amdgpu_device *adev = ring->adev;
uint32_t reg, reg_offset, val, mask, i;
// 1st: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW
reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW);
reg_offset = (reg << 2);
val = lower_32_bits(ring->gpu_addr);
jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val);
// 2nd: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH
reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH);
reg_offset = (reg << 2);
val = upper_32_bits(ring->gpu_addr);
jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val);
// 3rd to 5th: issue MEM_READ commands
for (i = 0; i <= 2; i++) {
ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE2);
ring->ring[ptr++] = 0;
}
// 6th: program mmUVD_JRBC_RB_CNTL register to enable NO_FETCH and RPTR write ability
reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_CNTL);
reg_offset = (reg << 2);
val = 0x13;
jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val);
// 7th: program mmUVD_JRBC_RB_REF_DATA
reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA);
reg_offset = (reg << 2);
val = 0x1;
jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val);
// 8th: issue conditional register read mmUVD_JRBC_RB_CNTL
reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_CNTL);
reg_offset = (reg << 2);
val = 0x1;
mask = 0x1;
ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0);
ring->ring[ptr++] = 0x01400200;
ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0);
ring->ring[ptr++] = val;
ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
ring->ring[ptr++] = 0;
ring->ring[ptr++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3);
} else {
ring->ring[ptr++] = reg_offset;
ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE3);
}
ring->ring[ptr++] = mask;
//9th to 21st: insert no-op
for (i = 0; i <= 12; i++) {
ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
ring->ring[ptr++] = 0;
}
//22nd: reset mmUVD_JRBC_RB_RPTR
reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_RPTR);
reg_offset = (reg << 2);
val = 0;
jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val);
//23rd: program mmUVD_JRBC_RB_CNTL to disable no_fetch
reg = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_CNTL);
reg_offset = (reg << 2);
val = 0x12;
jpeg_v1_0_decode_ring_patch_wreg(ring, &ptr, reg_offset, val);
}
/**
* jpeg_v1_0_decode_ring_get_rptr - get read pointer
*
* @ring: amdgpu_ring pointer
*
* Returns the current hardware read pointer
*/
static uint64_t jpeg_v1_0_decode_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR);
}
/**
* jpeg_v1_0_decode_ring_get_wptr - get write pointer
*
* @ring: amdgpu_ring pointer
*
* Returns the current hardware write pointer
*/
static uint64_t jpeg_v1_0_decode_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR);
}
/**
* jpeg_v1_0_decode_ring_set_wptr - set write pointer
*
* @ring: amdgpu_ring pointer
*
* Commits the write pointer to the hardware
*/
static void jpeg_v1_0_decode_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
}
/**
* jpeg_v1_0_decode_ring_insert_start - insert a start command
*
* @ring: amdgpu_ring pointer
*
* Write a start command to the ring.
*/
static void jpeg_v1_0_decode_ring_insert_start(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x68e04);
amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x80010000);
}
/**
* jpeg_v1_0_decode_ring_insert_end - insert a end command
*
* @ring: amdgpu_ring pointer
*
* Write a end command to the ring.
*/
static void jpeg_v1_0_decode_ring_insert_end(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x68e04);
amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x00010000);
}
/**
* jpeg_v1_0_decode_ring_emit_fence - emit an fence & trap command
*
* @ring: amdgpu_ring pointer
* @addr: address
* @seq: sequence number
* @flags: fence related flags
*
* Write a fence and a trap command to the ring.
*/
static void jpeg_v1_0_decode_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags)
{
struct amdgpu_device *adev = ring->adev;
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_GPCOM_DATA0), 0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, seq);
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_GPCOM_DATA1), 0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, seq);
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_GPCOM_CMD), 0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x8);
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_GPCOM_CMD), 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x01400200);
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, seq);
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring,
PACKETJ(0, 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE2));
amdgpu_ring_write(ring, 0xffffffff);
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x3fbc);
amdgpu_ring_write(ring,
PACKETJ(0, 0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x1);
/* emit trap */
amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7));
amdgpu_ring_write(ring, 0);
}
/**
* jpeg_v1_0_decode_ring_emit_ib - execute indirect buffer
*
* @ring: amdgpu_ring pointer
* @job: job to retrieve vmid from
* @ib: indirect buffer to execute
* @flags: unused
*
* Write ring commands to execute the indirect buffer.
*/
static void jpeg_v1_0_decode_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_job *job,
struct amdgpu_ib *ib,
uint32_t flags)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, (vmid | (vmid << 4)));
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, (vmid | (vmid << 4)));
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_IB_SIZE), 0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, ib->length_dw);
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
amdgpu_ring_write(ring,
PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x01400200);
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x2);
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_STATUS), 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
amdgpu_ring_write(ring, 0x2);
}
static void jpeg_v1_0_decode_ring_emit_reg_wait(struct amdgpu_ring *ring,
uint32_t reg, uint32_t val,
uint32_t mask)
{
struct amdgpu_device *adev = ring->adev;
uint32_t reg_offset = (reg << 2);
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x01400200);
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, val);
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring,
PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
} else {
amdgpu_ring_write(ring, reg_offset);
amdgpu_ring_write(ring,
PACKETJ(0, 0, 0, PACKETJ_TYPE3));
}
amdgpu_ring_write(ring, mask);
}
static void jpeg_v1_0_decode_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
uint32_t data0, data1, mask;
pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
/* wait for register write */
data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
data1 = lower_32_bits(pd_addr);
mask = 0xffffffff;
jpeg_v1_0_decode_ring_emit_reg_wait(ring, data0, data1, mask);
}
static void jpeg_v1_0_decode_ring_emit_wreg(struct amdgpu_ring *ring,
uint32_t reg, uint32_t val)
{
struct amdgpu_device *adev = ring->adev;
uint32_t reg_offset = (reg << 2);
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring,
PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
} else {
amdgpu_ring_write(ring, reg_offset);
amdgpu_ring_write(ring,
PACKETJ(0, 0, 0, PACKETJ_TYPE0));
}
amdgpu_ring_write(ring, val);
}
static void jpeg_v1_0_decode_ring_nop(struct amdgpu_ring *ring, uint32_t count)
{
int i;
WARN_ON(ring->wptr % 2 || count % 2);
for (i = 0; i < count / 2; i++) {
amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
amdgpu_ring_write(ring, 0);
}
}
static int jpeg_v1_0_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
return 0;
}
static int jpeg_v1_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
DRM_DEBUG("IH: JPEG decode TRAP\n");
switch (entry->src_id) {
case 126:
amdgpu_fence_process(adev->jpeg.inst->ring_dec);
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
break;
}
return 0;
}
/**
* jpeg_v1_0_early_init - set function pointers
*
* @handle: amdgpu_device pointer
*
* Set ring and irq function pointers
*/
int jpeg_v1_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->jpeg.num_jpeg_inst = 1;
adev->jpeg.num_jpeg_rings = 1;
jpeg_v1_0_set_dec_ring_funcs(adev);
jpeg_v1_0_set_irq_funcs(adev);
return 0;
}
/**
* jpeg_v1_0_sw_init - sw init for JPEG block
*
* @handle: amdgpu_device pointer
*
*/
int jpeg_v1_0_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
int r;
/* JPEG TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->jpeg.inst->irq);
if (r)
return r;
ring = adev->jpeg.inst->ring_dec;
ring->vm_hub = AMDGPU_MMHUB0(0);
sprintf(ring->name, "jpeg_dec");
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq,
0, AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
adev->jpeg.internal.jpeg_pitch[0] = adev->jpeg.inst->external.jpeg_pitch[0] =
SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH);
return 0;
}
/**
* jpeg_v1_0_sw_fini - sw fini for JPEG block
*
* @handle: amdgpu_device pointer
*
* JPEG free up sw allocation
*/
void jpeg_v1_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_ring_fini(adev->jpeg.inst->ring_dec);
}
/**
* jpeg_v1_0_start - start JPEG block
*
* @adev: amdgpu_device pointer
* @mode: SPG or DPG mode
*
* Setup and start the JPEG block
*/
void jpeg_v1_0_start(struct amdgpu_device *adev, int mode)
{
struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
if (mode == 0) {
WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr));
WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR, 0);
WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, 0);
WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
}
/* initialize wptr */
ring->wptr = RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR);
/* copy patch commands to the jpeg ring */
jpeg_v1_0_decode_ring_set_patch_ring(ring,
(ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
}
static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_JPEG,
.align_mask = 0xf,
.nop = PACKET0(0x81ff, 0),
.support_64bit_ptrs = false,
.no_user_fence = true,
.extra_dw = 64,
.get_rptr = jpeg_v1_0_decode_ring_get_rptr,
.get_wptr = jpeg_v1_0_decode_ring_get_wptr,
.set_wptr = jpeg_v1_0_decode_ring_set_wptr,
.emit_frame_size =
6 + 6 + /* hdp invalidate / flush */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
8 + /* jpeg_v1_0_decode_ring_emit_vm_flush */
26 + 26 + /* jpeg_v1_0_decode_ring_emit_fence x2 vm fence */
6,
.emit_ib_size = 22, /* jpeg_v1_0_decode_ring_emit_ib */
.emit_ib = jpeg_v1_0_decode_ring_emit_ib,
.emit_fence = jpeg_v1_0_decode_ring_emit_fence,
.emit_vm_flush = jpeg_v1_0_decode_ring_emit_vm_flush,
.test_ring = amdgpu_jpeg_dec_ring_test_ring,
.test_ib = amdgpu_jpeg_dec_ring_test_ib,
.insert_nop = jpeg_v1_0_decode_ring_nop,
.insert_start = jpeg_v1_0_decode_ring_insert_start,
.insert_end = jpeg_v1_0_decode_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = jpeg_v1_0_ring_begin_use,
.end_use = vcn_v1_0_ring_end_use,
.emit_wreg = jpeg_v1_0_decode_ring_emit_wreg,
.emit_reg_wait = jpeg_v1_0_decode_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
static void jpeg_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
{
adev->jpeg.inst->ring_dec->funcs = &jpeg_v1_0_decode_ring_vm_funcs;
DRM_INFO("JPEG decode is enabled in VM mode\n");
}
static const struct amdgpu_irq_src_funcs jpeg_v1_0_irq_funcs = {
.set = jpeg_v1_0_set_interrupt_state,
.process = jpeg_v1_0_process_interrupt,
};
static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->jpeg.inst->irq.funcs = &jpeg_v1_0_irq_funcs;
}
static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
int cnt = 0;
mutex_lock(&adev->vcn.vcn1_jpeg1_workaround);
if (amdgpu_fence_wait_empty(&adev->vcn.inst->ring_dec))
DRM_ERROR("JPEG dec: vcn dec ring may not be empty\n");
for (cnt = 0; cnt < adev->vcn.num_enc_rings; cnt++) {
if (amdgpu_fence_wait_empty(&adev->vcn.inst->ring_enc[cnt]))
DRM_ERROR("JPEG dec: vcn enc ring[%d] may not be empty\n", cnt);
}
vcn_v1_0_set_pg_for_begin_use(ring, set_clocks);
}
| linux-master | drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c |
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Alex Deucher
*/
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include "amdgpu.h"
#include "amdgpu_ucode.h"
#include "amdgpu_trace.h"
#include "vi.h"
#include "vid.h"
#include "oss/oss_2_4_d.h"
#include "oss/oss_2_4_sh_mask.h"
#include "gmc/gmc_7_1_d.h"
#include "gmc/gmc_7_1_sh_mask.h"
#include "gca/gfx_8_0_d.h"
#include "gca/gfx_8_0_enum.h"
#include "gca/gfx_8_0_sh_mask.h"
#include "bif/bif_5_0_d.h"
#include "bif/bif_5_0_sh_mask.h"
#include "iceland_sdma_pkt_open.h"
#include "ivsrcid/ivsrcid_vislands30.h"
static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev);
MODULE_FIRMWARE("amdgpu/topaz_sdma.bin");
MODULE_FIRMWARE("amdgpu/topaz_sdma1.bin");
static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
{
SDMA0_REGISTER_OFFSET,
SDMA1_REGISTER_OFFSET
};
static const u32 golden_settings_iceland_a11[] =
{
mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
};
static const u32 iceland_mgcg_cgcg_init[] =
{
mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
};
/*
* sDMA - System DMA
* Starting with CIK, the GPU has new asynchronous
* DMA engines. These engines are used for compute
* and gfx. There are two DMA engines (SDMA0, SDMA1)
* and each one supports 1 ring buffer used for gfx
* and 2 queues used for compute.
*
* The programming model is very similar to the CP
* (ring buffer, IBs, etc.), but sDMA has it's own
* packet format that is different from the PM4 format
* used by the CP. sDMA supports copying data, writing
* embedded data, solid fills, and a number of other
* things. It also has support for tiling/detiling of
* buffers.
*/
static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_TOPAZ:
amdgpu_device_program_register_sequence(adev,
iceland_mgcg_cgcg_init,
ARRAY_SIZE(iceland_mgcg_cgcg_init));
amdgpu_device_program_register_sequence(adev,
golden_settings_iceland_a11,
ARRAY_SIZE(golden_settings_iceland_a11));
break;
default:
break;
}
}
static void sdma_v2_4_free_microcode(struct amdgpu_device *adev)
{
int i;
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
/**
* sdma_v2_4_init_microcode - load ucode images from disk
*
* @adev: amdgpu_device pointer
*
* Use the firmware interface to load the ucode images into
* the driver (not loaded into hw).
* Returns 0 on success, error on failure.
*/
static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
char fw_name[30];
int err = 0, i;
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
const struct sdma_firmware_header_v1_0 *hdr;
DRM_DEBUG("\n");
switch (adev->asic_type) {
case CHIP_TOPAZ:
chip_name = "topaz";
break;
default: BUG();
}
for (i = 0; i < adev->sdma.num_instances; i++) {
if (i == 0)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, fw_name);
if (err)
goto out;
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
if (adev->sdma.instance[i].feature_version >= 20)
adev->sdma.instance[i].burst_nop = true;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
info->fw = adev->sdma.instance[i].fw;
header = (const struct common_firmware_header *)info->fw->data;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
}
}
out:
if (err) {
pr_err("sdma_v2_4: Failed to load firmware \"%s\"\n", fw_name);
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
return err;
}
/**
* sdma_v2_4_ring_get_rptr - get the current read pointer
*
* @ring: amdgpu ring pointer
*
* Get the current rptr from the hardware (VI+).
*/
static uint64_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
{
/* XXX check if swapping is necessary on BE */
return *ring->rptr_cpu_addr >> 2;
}
/**
* sdma_v2_4_ring_get_wptr - get the current write pointer
*
* @ring: amdgpu ring pointer
*
* Get the current wptr from the hardware (VI+).
*/
static uint64_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) >> 2;
return wptr;
}
/**
* sdma_v2_4_ring_set_wptr - commit the write pointer
*
* @ring: amdgpu ring pointer
*
* Write the wptr back to the hardware (VI+).
*/
static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], ring->wptr << 2);
}
static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
int i;
for (i = 0; i < count; i++)
if (sdma && sdma->burst_nop && (i == 0))
amdgpu_ring_write(ring, ring->funcs->nop |
SDMA_PKT_NOP_HEADER_COUNT(count - 1));
else
amdgpu_ring_write(ring, ring->funcs->nop);
}
/**
* sdma_v2_4_ring_emit_ib - Schedule an IB on the DMA engine
*
* @ring: amdgpu ring pointer
* @job: job to retrieve vmid from
* @ib: IB object to schedule
* @flags: unused
*
* Schedule an IB in the DMA ring (VI).
*/
static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_job *job,
struct amdgpu_ib *ib,
uint32_t flags)
{
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
/* IB packet must end on a 8 DW boundary */
sdma_v2_4_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
/* base must be 32 byte aligned */
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, ib->length_dw);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 0);
}
/**
* sdma_v2_4_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
*
* @ring: amdgpu ring pointer
*
* Emit an hdp flush packet on the requested DMA ring.
*/
static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask = 0;
if (ring->me == 0)
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
else
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
amdgpu_ring_write(ring, ref_and_mask); /* reference */
amdgpu_ring_write(ring, ref_and_mask); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
/**
* sdma_v2_4_ring_emit_fence - emit a fence on the DMA ring
*
* @ring: amdgpu ring pointer
* @addr: address
* @seq: sequence number
* @flags: fence related flags
*
* Add a DMA fence packet to the ring to write
* the fence seq number and DMA trap packet to generate
* an interrupt if needed (VI).
*/
static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags)
{
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
/* write the fence */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, lower_32_bits(seq));
/* optionally write high bits as well */
if (write64bit) {
addr += 4;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(seq));
}
/* generate an interrupt */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}
/**
* sdma_v2_4_gfx_stop - stop the gfx async dma engines
*
* @adev: amdgpu_device pointer
*
* Stop the gfx async dma ring buffers (VI).
*/
static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
{
u32 rb_cntl, ib_cntl;
int i;
amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
}
}
/**
* sdma_v2_4_rlc_stop - stop the compute async dma engines
*
* @adev: amdgpu_device pointer
*
* Stop the compute async dma queues (VI).
*/
static void sdma_v2_4_rlc_stop(struct amdgpu_device *adev)
{
/* XXX todo */
}
/**
* sdma_v2_4_enable - stop the async dma engines
*
* @adev: amdgpu_device pointer
* @enable: enable/disable the DMA MEs.
*
* Halt or unhalt the async dma engines (VI).
*/
static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable)
{
u32 f32_cntl;
int i;
if (!enable) {
sdma_v2_4_gfx_stop(adev);
sdma_v2_4_rlc_stop(adev);
}
for (i = 0; i < adev->sdma.num_instances; i++) {
f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
if (enable)
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
else
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl);
}
}
/**
* sdma_v2_4_gfx_resume - setup and start the async dma engines
*
* @adev: amdgpu_device pointer
*
* Set up the gfx DMA ring buffers and enable them (VI).
* Returns 0 for success, error for failure.
*/
static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
u32 rb_cntl, ib_cntl;
u32 rb_bufsz;
int i, j, r;
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
mutex_lock(&adev->srbm_mutex);
for (j = 0; j < 16; j++) {
vi_srbm_select(adev, 0, 0, 0, j);
/* SDMA GFX */
WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
}
vi_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
adev->gfx.config.gb_addr_config & 0x70);
WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
/* Set ring buffer size in dwords */
rb_bufsz = order_base_2(ring->ring_size / 4);
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
#ifdef __BIG_ENDIAN
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
RPTR_WRITEBACK_SWAP_ENABLE, 1);
#endif
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
/* Initialize the ring buffer's read and write pointers */
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
/* set the wb address whether it's enabled or not */
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
ring->wptr = 0;
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
/* enable DMA RB */
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
#ifdef __BIG_ENDIAN
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
#endif
/* enable DMA IBs */
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
}
sdma_v2_4_enable(adev, true);
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
return 0;
}
/**
* sdma_v2_4_rlc_resume - setup and start the async dma engines
*
* @adev: amdgpu_device pointer
*
* Set up the compute DMA queues and enable them (VI).
* Returns 0 for success, error for failure.
*/
static int sdma_v2_4_rlc_resume(struct amdgpu_device *adev)
{
/* XXX todo */
return 0;
}
/**
* sdma_v2_4_start - setup and start the async dma engines
*
* @adev: amdgpu_device pointer
*
* Set up the DMA engines and enable them (VI).
* Returns 0 for success, error for failure.
*/
static int sdma_v2_4_start(struct amdgpu_device *adev)
{
int r;
/* halt the engine before programing */
sdma_v2_4_enable(adev, false);
/* start the gfx rings and rlc compute queues */
r = sdma_v2_4_gfx_resume(adev);
if (r)
return r;
r = sdma_v2_4_rlc_resume(adev);
if (r)
return r;
return 0;
}
/**
* sdma_v2_4_ring_test_ring - simple async dma engine test
*
* @ring: amdgpu_ring structure holding ring information
*
* Test the DMA engine by writing using it to write an
* value to memory. (VI).
* Returns 0 for success, error for failure.
*/
static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
unsigned i;
unsigned index;
int r;
u32 tmp;
u64 gpu_addr;
r = amdgpu_device_wb_get(adev, &index);
if (r)
return r;
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
r = amdgpu_ring_alloc(ring, 5);
if (r)
goto error_free_wb;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
udelay(1);
}
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
error_free_wb:
amdgpu_device_wb_free(adev, index);
return r;
}
/**
* sdma_v2_4_ring_test_ib - test an IB on the DMA engine
*
* @ring: amdgpu_ring structure holding ring information
* @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
*
* Test a simple IB in the DMA ring (VI).
* Returns 0 on success, error on failure.
*/
static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct dma_fence *f = NULL;
unsigned index;
u32 tmp = 0;
u64 gpu_addr;
long r;
r = amdgpu_device_wb_get(adev, &index);
if (r)
return r;
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256,
AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err0;
ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
ib.ptr[1] = lower_32_bits(gpu_addr);
ib.ptr[2] = upper_32_bits(gpu_addr);
ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1);
ib.ptr[4] = 0xDEADBEEF;
ib.ptr[5] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
ib.ptr[6] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
ib.length_dw = 8;
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
goto err1;
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
r = -ETIMEDOUT;
goto err1;
} else if (r < 0) {
goto err1;
}
tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
r = 0;
else
r = -EINVAL;
err1:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err0:
amdgpu_device_wb_free(adev, index);
return r;
}
/**
* sdma_v2_4_vm_copy_pte - update PTEs by copying them from the GART
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @src: src addr to copy from
* @count: number of page entries to update
*
* Update PTEs by copying them from the GART using sDMA (CIK).
*/
static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib,
uint64_t pe, uint64_t src,
unsigned count)
{
unsigned bytes = count * 8;
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
ib->ptr[ib->length_dw++] = bytes;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src);
ib->ptr[ib->length_dw++] = upper_32_bits(src);
ib->ptr[ib->length_dw++] = lower_32_bits(pe);
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
}
/**
* sdma_v2_4_vm_write_pte - update PTEs by writing them manually
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @value: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
*
* Update PTEs by writing them manually using sDMA (CIK).
*/
static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
uint64_t value, unsigned count,
uint32_t incr)
{
unsigned ndw = count * 2;
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
ib->ptr[ib->length_dw++] = pe;
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = ndw;
for (; ndw > 0; ndw -= 2) {
ib->ptr[ib->length_dw++] = lower_32_bits(value);
ib->ptr[ib->length_dw++] = upper_32_bits(value);
value += incr;
}
}
/**
* sdma_v2_4_vm_set_pte_pde - update the page tables using sDMA
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: access flags
*
* Update the page tables using sDMA (CIK).
*/
static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint64_t flags)
{
/* for physically contiguous pages (vram) */
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
ib->ptr[ib->length_dw++] = upper_32_bits(flags);
ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
ib->ptr[ib->length_dw++] = incr; /* increment size */
ib->ptr[ib->length_dw++] = 0;
ib->ptr[ib->length_dw++] = count; /* number of entries */
}
/**
* sdma_v2_4_ring_pad_ib - pad the IB to the required number of dw
*
* @ring: amdgpu_ring structure holding ring information
* @ib: indirect buffer to fill with padding
*
*/
static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
u32 pad_count;
int i;
pad_count = (-ib->length_dw) & 7;
for (i = 0; i < pad_count; i++)
if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =
SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
else
ib->ptr[ib->length_dw++] =
SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
}
/**
* sdma_v2_4_ring_emit_pipeline_sync - sync the pipeline
*
* @ring: amdgpu_ring pointer
*
* Make sure all previous operations are completed (CIK).
*/
static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
/* wait for idle */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); /* reference */
amdgpu_ring_write(ring, 0xffffffff); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
}
/**
* sdma_v2_4_ring_emit_vm_flush - cik vm flush using sDMA
*
* @ring: amdgpu_ring pointer
* @vmid: vmid number to use
* @pd_addr: address
*
* Update the page table base and flush the VM TLB
* using sDMA (VI).
*/
static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
/* wait for flush */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0)); /* always */
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 0); /* reference */
amdgpu_ring_write(ring, 0); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
static void sdma_v2_4_ring_emit_wreg(struct amdgpu_ring *ring,
uint32_t reg, uint32_t val)
{
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, val);
}
static int sdma_v2_4_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->sdma.num_instances = SDMA_MAX_INSTANCE;
sdma_v2_4_set_ring_funcs(adev);
sdma_v2_4_set_buffer_funcs(adev);
sdma_v2_4_set_vm_pte_funcs(adev);
sdma_v2_4_set_irq_funcs(adev);
return 0;
}
static int sdma_v2_4_sw_init(void *handle)
{
struct amdgpu_ring *ring;
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SDMA trap event */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
&adev->sdma.trap_irq);
if (r)
return r;
/* SDMA Privileged inst */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
/* SDMA Privileged inst */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
r = sdma_v2_4_init_microcode(adev);
if (r) {
DRM_ERROR("Failed to load sdma firmware!\n");
return r;
}
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
ring->use_doorbell = false;
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
(i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
AMDGPU_SDMA_IRQ_INSTANCE1,
AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
return r;
}
static int sdma_v2_4_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
sdma_v2_4_free_microcode(adev);
return 0;
}
static int sdma_v2_4_hw_init(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
sdma_v2_4_init_golden_registers(adev);
r = sdma_v2_4_start(adev);
if (r)
return r;
return r;
}
static int sdma_v2_4_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
sdma_v2_4_enable(adev, false);
return 0;
}
static int sdma_v2_4_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return sdma_v2_4_hw_fini(adev);
}
static int sdma_v2_4_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return sdma_v2_4_hw_init(adev);
}
static bool sdma_v2_4_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 tmp = RREG32(mmSRBM_STATUS2);
if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
SRBM_STATUS2__SDMA1_BUSY_MASK))
return false;
return true;
}
static int sdma_v2_4_wait_for_idle(void *handle)
{
unsigned i;
u32 tmp;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
SRBM_STATUS2__SDMA1_BUSY_MASK);
if (!tmp)
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
static int sdma_v2_4_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 tmp = RREG32(mmSRBM_STATUS2);
if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
/* sdma0 */
tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
}
if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
/* sdma1 */
tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0);
WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
}
if (srbm_soft_reset) {
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(mmSRBM_SOFT_RESET, tmp);
tmp = RREG32(mmSRBM_SOFT_RESET);
udelay(50);
tmp &= ~srbm_soft_reset;
WREG32(mmSRBM_SOFT_RESET, tmp);
tmp = RREG32(mmSRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
}
return 0;
}
static int sdma_v2_4_set_trap_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
{
u32 sdma_cntl;
switch (type) {
case AMDGPU_SDMA_IRQ_INSTANCE0:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
break;
default:
break;
}
break;
case AMDGPU_SDMA_IRQ_INSTANCE1:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
break;
default:
break;
}
break;
default:
break;
}
return 0;
}
static int sdma_v2_4_process_trap_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
u8 instance_id, queue_id;
instance_id = (entry->ring_id & 0x3) >> 0;
queue_id = (entry->ring_id & 0xc) >> 2;
DRM_DEBUG("IH: SDMA trap\n");
switch (instance_id) {
case 0:
switch (queue_id) {
case 0:
amdgpu_fence_process(&adev->sdma.instance[0].ring);
break;
case 1:
/* XXX compute */
break;
case 2:
/* XXX compute */
break;
}
break;
case 1:
switch (queue_id) {
case 0:
amdgpu_fence_process(&adev->sdma.instance[1].ring);
break;
case 1:
/* XXX compute */
break;
case 2:
/* XXX compute */
break;
}
break;
}
return 0;
}
static int sdma_v2_4_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
u8 instance_id, queue_id;
DRM_ERROR("Illegal instruction in SDMA command stream\n");
instance_id = (entry->ring_id & 0x3) >> 0;
queue_id = (entry->ring_id & 0xc) >> 2;
if (instance_id <= 1 && queue_id == 0)
drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
return 0;
}
static int sdma_v2_4_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
/* XXX handled via the smc on VI */
return 0;
}
static int sdma_v2_4_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
return 0;
}
static const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
.name = "sdma_v2_4",
.early_init = sdma_v2_4_early_init,
.late_init = NULL,
.sw_init = sdma_v2_4_sw_init,
.sw_fini = sdma_v2_4_sw_fini,
.hw_init = sdma_v2_4_hw_init,
.hw_fini = sdma_v2_4_hw_fini,
.suspend = sdma_v2_4_suspend,
.resume = sdma_v2_4_resume,
.is_idle = sdma_v2_4_is_idle,
.wait_for_idle = sdma_v2_4_wait_for_idle,
.soft_reset = sdma_v2_4_soft_reset,
.set_clockgating_state = sdma_v2_4_set_clockgating_state,
.set_powergating_state = sdma_v2_4_set_powergating_state,
};
static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
.type = AMDGPU_RING_TYPE_SDMA,
.align_mask = 0xf,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = false,
.secure_submission_supported = true,
.get_rptr = sdma_v2_4_ring_get_rptr,
.get_wptr = sdma_v2_4_ring_get_wptr,
.set_wptr = sdma_v2_4_ring_set_wptr,
.emit_frame_size =
6 + /* sdma_v2_4_ring_emit_hdp_flush */
3 + /* hdp invalidate */
6 + /* sdma_v2_4_ring_emit_pipeline_sync */
VI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* sdma_v2_4_ring_emit_vm_flush */
10 + 10 + 10, /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 6, /* sdma_v2_4_ring_emit_ib */
.emit_ib = sdma_v2_4_ring_emit_ib,
.emit_fence = sdma_v2_4_ring_emit_fence,
.emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync,
.emit_vm_flush = sdma_v2_4_ring_emit_vm_flush,
.emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush,
.test_ring = sdma_v2_4_ring_test_ring,
.test_ib = sdma_v2_4_ring_test_ib,
.insert_nop = sdma_v2_4_ring_insert_nop,
.pad_ib = sdma_v2_4_ring_pad_ib,
.emit_wreg = sdma_v2_4_ring_emit_wreg,
};
static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.instance[i].ring.funcs = &sdma_v2_4_ring_funcs;
adev->sdma.instance[i].ring.me = i;
}
}
static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = {
.set = sdma_v2_4_set_trap_irq_state,
.process = sdma_v2_4_process_trap_irq,
};
static const struct amdgpu_irq_src_funcs sdma_v2_4_illegal_inst_irq_funcs = {
.process = sdma_v2_4_process_illegal_inst_irq,
};
static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev)
{
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
adev->sdma.trap_irq.funcs = &sdma_v2_4_trap_irq_funcs;
adev->sdma.illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs;
}
/**
* sdma_v2_4_emit_copy_buffer - copy buffer using the sDMA engine
*
* @ib: indirect buffer to copy to
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
* @tmz: unused
*
* Copy GPU buffers using the DMA engine (VI).
* Used by the amdgpu ttm implementation to move pages if
* registered as the asic copy callback.
*/
static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
uint32_t byte_count,
bool tmz)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
ib->ptr[ib->length_dw++] = byte_count;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
}
/**
* sdma_v2_4_emit_fill_buffer - fill buffer using the sDMA engine
*
* @ib: indirect buffer to copy to
* @src_data: value to write to buffer
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
*
* Fill GPU buffers using the DMA engine (VI).
*/
static void sdma_v2_4_emit_fill_buffer(struct amdgpu_ib *ib,
uint32_t src_data,
uint64_t dst_offset,
uint32_t byte_count)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = src_data;
ib->ptr[ib->length_dw++] = byte_count;
}
static const struct amdgpu_buffer_funcs sdma_v2_4_buffer_funcs = {
.copy_max_bytes = 0x1fffff,
.copy_num_dw = 7,
.emit_copy_buffer = sdma_v2_4_emit_copy_buffer,
.fill_max_bytes = 0x1fffff,
.fill_num_dw = 7,
.emit_fill_buffer = sdma_v2_4_emit_fill_buffer,
};
static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev)
{
adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs;
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
.copy_pte_num_dw = 7,
.copy_pte = sdma_v2_4_vm_copy_pte,
.write_pte = sdma_v2_4_vm_write_pte,
.set_pte_pde = sdma_v2_4_vm_set_pte_pde,
};
static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
{
unsigned i;
adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) {
adev->vm_manager.vm_pte_scheds[i] =
&adev->sdma.instance[i].ring.sched;
}
adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_SDMA,
.major = 2,
.minor = 4,
.rev = 0,
.funcs = &sdma_v2_4_ip_funcs,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c |
// SPDX-License-Identifier: MIT
/*
* Copyright 2014-2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/dma-buf.h>
#include <linux/list.h>
#include <linux/pagemap.h>
#include <linux/sched/mm.h>
#include <linux/sched/task.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/drm_exec.h>
#include "amdgpu_object.h"
#include "amdgpu_gem.h"
#include "amdgpu_vm.h"
#include "amdgpu_hmm.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_dma_buf.h"
#include <uapi/linux/kfd_ioctl.h>
#include "amdgpu_xgmi.h"
#include "kfd_priv.h"
#include "kfd_smi_events.h"
/* Userptr restore delay, just long enough to allow consecutive VM
* changes to accumulate
*/
#define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
/*
* Align VRAM availability to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB
* BO chunk
*/
#define VRAM_AVAILABLITY_ALIGN (1 << 21)
/* Impose limit on how much memory KFD can use */
static struct {
uint64_t max_system_mem_limit;
uint64_t max_ttm_mem_limit;
int64_t system_mem_used;
int64_t ttm_mem_used;
spinlock_t mem_limit_lock;
} kfd_mem_limit;
static const char * const domain_bit_to_string[] = {
"CPU",
"GTT",
"VRAM",
"GDS",
"GWS",
"OA"
};
#define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
static bool kfd_mem_is_attached(struct amdgpu_vm *avm,
struct kgd_mem *mem)
{
struct kfd_mem_attachment *entry;
list_for_each_entry(entry, &mem->attachments, list)
if (entry->bo_va->base.vm == avm)
return true;
return false;
}
/**
* reuse_dmamap() - Check whether adev can share the original
* userptr BO
*
* If both adev and bo_adev are in direct mapping or
* in the same iommu group, they can share the original BO.
*
* @adev: Device to which can or cannot share the original BO
* @bo_adev: Device to which allocated BO belongs to
*
* Return: returns true if adev can share original userptr BO,
* false otherwise.
*/
static bool reuse_dmamap(struct amdgpu_device *adev, struct amdgpu_device *bo_adev)
{
return (adev->ram_is_direct_mapped && bo_adev->ram_is_direct_mapped) ||
(adev->dev->iommu_group == bo_adev->dev->iommu_group);
}
/* Set memory usage limits. Current, limits are
* System (TTM + userptr) memory - 15/16th System RAM
* TTM memory - 3/8th System RAM
*/
void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
{
struct sysinfo si;
uint64_t mem;
if (kfd_mem_limit.max_system_mem_limit)
return;
si_meminfo(&si);
mem = si.freeram - si.freehigh;
mem *= si.mem_unit;
spin_lock_init(&kfd_mem_limit.mem_limit_lock);
kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
kfd_mem_limit.max_ttm_mem_limit = ttm_tt_pages_limit() << PAGE_SHIFT;
pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
(kfd_mem_limit.max_system_mem_limit >> 20),
(kfd_mem_limit.max_ttm_mem_limit >> 20));
}
void amdgpu_amdkfd_reserve_system_mem(uint64_t size)
{
kfd_mem_limit.system_mem_used += size;
}
/* Estimate page table size needed to represent a given memory size
*
* With 4KB pages, we need one 8 byte PTE for each 4KB of memory
* (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
* of memory (factor 256K, >> 18). ROCm user mode tries to optimize
* for 2MB pages for TLB efficiency. However, small allocations and
* fragmented system memory still need some 4KB pages. We choose a
* compromise that should work in most cases without reserving too
* much memory for page tables unnecessarily (factor 16K, >> 14).
*/
#define ESTIMATE_PT_SIZE(mem_size) max(((mem_size) >> 14), AMDGPU_VM_RESERVED_VRAM)
/**
* amdgpu_amdkfd_reserve_mem_limit() - Decrease available memory by size
* of buffer.
*
* @adev: Device to which allocated BO belongs to
* @size: Size of buffer, in bytes, encapsulated by B0. This should be
* equivalent to amdgpu_bo_size(BO)
* @alloc_flag: Flag used in allocating a BO as noted above
* @xcp_id: xcp_id is used to get xcp from xcp manager, one xcp is
* managed as one compute node in driver for app
*
* Return:
* returns -ENOMEM in case of error, ZERO otherwise
*/
int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
uint64_t size, u32 alloc_flag, int8_t xcp_id)
{
uint64_t reserved_for_pt =
ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
size_t system_mem_needed, ttm_mem_needed, vram_needed;
int ret = 0;
uint64_t vram_size = 0;
system_mem_needed = 0;
ttm_mem_needed = 0;
vram_needed = 0;
if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
system_mem_needed = size;
ttm_mem_needed = size;
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
/*
* Conservatively round up the allocation requirement to 2 MB
* to avoid fragmentation caused by 4K allocations in the tail
* 2M BO chunk.
*/
vram_needed = size;
/*
* For GFX 9.4.3, get the VRAM size from XCP structs
*/
if (WARN_ONCE(xcp_id < 0, "invalid XCP ID %d", xcp_id))
return -EINVAL;
vram_size = KFD_XCP_MEMORY_SIZE(adev, xcp_id);
if (adev->gmc.is_app_apu) {
system_mem_needed = size;
ttm_mem_needed = size;
}
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
system_mem_needed = size;
} else if (!(alloc_flag &
(KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
return -ENOMEM;
}
spin_lock(&kfd_mem_limit.mem_limit_lock);
if (kfd_mem_limit.system_mem_used + system_mem_needed >
kfd_mem_limit.max_system_mem_limit)
pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
if ((kfd_mem_limit.system_mem_used + system_mem_needed >
kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
(kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
kfd_mem_limit.max_ttm_mem_limit) ||
(adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] + vram_needed >
vram_size - reserved_for_pt)) {
ret = -ENOMEM;
goto release;
}
/* Update memory accounting by decreasing available system
* memory, TTM memory and GPU memory as computed above
*/
WARN_ONCE(vram_needed && !adev,
"adev reference can't be null when vram is used");
if (adev && xcp_id >= 0) {
adev->kfd.vram_used[xcp_id] += vram_needed;
adev->kfd.vram_used_aligned[xcp_id] += adev->gmc.is_app_apu ?
vram_needed :
ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
}
kfd_mem_limit.system_mem_used += system_mem_needed;
kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
release:
spin_unlock(&kfd_mem_limit.mem_limit_lock);
return ret;
}
void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
uint64_t size, u32 alloc_flag, int8_t xcp_id)
{
spin_lock(&kfd_mem_limit.mem_limit_lock);
if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
kfd_mem_limit.system_mem_used -= size;
kfd_mem_limit.ttm_mem_used -= size;
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
WARN_ONCE(!adev,
"adev reference can't be null when alloc mem flags vram is set");
if (WARN_ONCE(xcp_id < 0, "invalid XCP ID %d", xcp_id))
goto release;
if (adev) {
adev->kfd.vram_used[xcp_id] -= size;
if (adev->gmc.is_app_apu) {
adev->kfd.vram_used_aligned[xcp_id] -= size;
kfd_mem_limit.system_mem_used -= size;
kfd_mem_limit.ttm_mem_used -= size;
} else {
adev->kfd.vram_used_aligned[xcp_id] -=
ALIGN(size, VRAM_AVAILABLITY_ALIGN);
}
}
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
kfd_mem_limit.system_mem_used -= size;
} else if (!(alloc_flag &
(KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
goto release;
}
WARN_ONCE(adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] < 0,
"KFD VRAM memory accounting unbalanced for xcp: %d", xcp_id);
WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
"KFD TTM memory accounting unbalanced");
WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
"KFD system memory accounting unbalanced");
release:
spin_unlock(&kfd_mem_limit.mem_limit_lock);
}
void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
u32 alloc_flags = bo->kfd_bo->alloc_flags;
u64 size = amdgpu_bo_size(bo);
amdgpu_amdkfd_unreserve_mem_limit(adev, size, alloc_flags,
bo->xcp_id);
kfree(bo->kfd_bo);
}
/**
* create_dmamap_sg_bo() - Creates a amdgpu_bo object to reflect information
* about USERPTR or DOOREBELL or MMIO BO.
*
* @adev: Device for which dmamap BO is being created
* @mem: BO of peer device that is being DMA mapped. Provides parameters
* in building the dmamap BO
* @bo_out: Output parameter updated with handle of dmamap BO
*/
static int
create_dmamap_sg_bo(struct amdgpu_device *adev,
struct kgd_mem *mem, struct amdgpu_bo **bo_out)
{
struct drm_gem_object *gem_obj;
int ret;
uint64_t flags = 0;
ret = amdgpu_bo_reserve(mem->bo, false);
if (ret)
return ret;
if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR)
flags |= mem->bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
AMDGPU_GEM_CREATE_UNCACHED);
ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, 1,
AMDGPU_GEM_DOMAIN_CPU, AMDGPU_GEM_CREATE_PREEMPTIBLE | flags,
ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj, 0);
amdgpu_bo_unreserve(mem->bo);
if (ret) {
pr_err("Error in creating DMA mappable SG BO on domain: %d\n", ret);
return -EINVAL;
}
*bo_out = gem_to_amdgpu_bo(gem_obj);
(*bo_out)->parent = amdgpu_bo_ref(mem->bo);
return ret;
}
/* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
* reservation object.
*
* @bo: [IN] Remove eviction fence(s) from this BO
* @ef: [IN] This eviction fence is removed if it
* is present in the shared list.
*
* NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
*/
static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
struct amdgpu_amdkfd_fence *ef)
{
struct dma_fence *replacement;
if (!ef)
return -EINVAL;
/* TODO: Instead of block before we should use the fence of the page
* table update and TLB flush here directly.
*/
replacement = dma_fence_get_stub();
dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
replacement, DMA_RESV_USAGE_BOOKKEEP);
dma_fence_put(replacement);
return 0;
}
int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
{
struct amdgpu_bo *root = bo;
struct amdgpu_vm_bo_base *vm_bo;
struct amdgpu_vm *vm;
struct amdkfd_process_info *info;
struct amdgpu_amdkfd_fence *ef;
int ret;
/* we can always get vm_bo from root PD bo.*/
while (root->parent)
root = root->parent;
vm_bo = root->vm_bo;
if (!vm_bo)
return 0;
vm = vm_bo->vm;
if (!vm)
return 0;
info = vm->process_info;
if (!info || !info->eviction_fence)
return 0;
ef = container_of(dma_fence_get(&info->eviction_fence->base),
struct amdgpu_amdkfd_fence, base);
BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
dma_resv_unlock(bo->tbo.base.resv);
dma_fence_put(&ef->base);
return ret;
}
static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
bool wait)
{
struct ttm_operation_ctx ctx = { false, false };
int ret;
if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
"Called with userptr BO"))
return -EINVAL;
amdgpu_bo_placement_from_domain(bo, domain);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
goto validate_fail;
if (wait)
amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
validate_fail:
return ret;
}
static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
{
return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
}
/* vm_validate_pt_pd_bos - Validate page table and directory BOs
*
* Page directories are not updated here because huge page handling
* during page table updates can invalidate page directory entries
* again. Page directories are only updated after updating page
* tables.
*/
static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
{
struct amdgpu_bo *pd = vm->root.bo;
struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
int ret;
ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL);
if (ret) {
pr_err("failed to validate PT BOs\n");
return ret;
}
vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo);
return 0;
}
static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
{
struct amdgpu_bo *pd = vm->root.bo;
struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
int ret;
ret = amdgpu_vm_update_pdes(adev, vm, false);
if (ret)
return ret;
return amdgpu_sync_fence(sync, vm->last_update);
}
static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
{
uint32_t mapping_flags = AMDGPU_VM_PAGE_READABLE |
AMDGPU_VM_MTYPE_DEFAULT;
if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
return amdgpu_gem_va_map_flags(adev, mapping_flags);
}
/**
* create_sg_table() - Create an sg_table for a contiguous DMA addr range
* @addr: The starting address to point to
* @size: Size of memory area in bytes being pointed to
*
* Allocates an instance of sg_table and initializes it to point to memory
* area specified by input parameters. The address used to build is assumed
* to be DMA mapped, if needed.
*
* DOORBELL or MMIO BOs use only one scatterlist node in their sg_table
* because they are physically contiguous.
*
* Return: Initialized instance of SG Table or NULL
*/
static struct sg_table *create_sg_table(uint64_t addr, uint32_t size)
{
struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
if (!sg)
return NULL;
if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
kfree(sg);
return NULL;
}
sg_dma_address(sg->sgl) = addr;
sg->sgl->length = size;
#ifdef CONFIG_NEED_SG_DMA_LENGTH
sg->sgl->dma_length = size;
#endif
return sg;
}
static int
kfd_mem_dmamap_userptr(struct kgd_mem *mem,
struct kfd_mem_attachment *attachment)
{
enum dma_data_direction direction =
mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
struct ttm_operation_ctx ctx = {.interruptible = true};
struct amdgpu_bo *bo = attachment->bo_va->base.bo;
struct amdgpu_device *adev = attachment->adev;
struct ttm_tt *src_ttm = mem->bo->tbo.ttm;
struct ttm_tt *ttm = bo->tbo.ttm;
int ret;
if (WARN_ON(ttm->num_pages != src_ttm->num_pages))
return -EINVAL;
ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL);
if (unlikely(!ttm->sg))
return -ENOMEM;
/* Same sequence as in amdgpu_ttm_tt_pin_userptr */
ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages,
ttm->num_pages, 0,
(u64)ttm->num_pages << PAGE_SHIFT,
GFP_KERNEL);
if (unlikely(ret))
goto free_sg;
ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
if (unlikely(ret))
goto release_sg;
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
goto unmap_sg;
return 0;
unmap_sg:
dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
release_sg:
pr_err("DMA map userptr failed: %d\n", ret);
sg_free_table(ttm->sg);
free_sg:
kfree(ttm->sg);
ttm->sg = NULL;
return ret;
}
static int
kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment)
{
struct ttm_operation_ctx ctx = {.interruptible = true};
struct amdgpu_bo *bo = attachment->bo_va->base.bo;
int ret;
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
return ret;
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
}
/**
* kfd_mem_dmamap_sg_bo() - Create DMA mapped sg_table to access DOORBELL or MMIO BO
* @mem: SG BO of the DOORBELL or MMIO resource on the owning device
* @attachment: Virtual address attachment of the BO on accessing device
*
* An access request from the device that owns DOORBELL does not require DMA mapping.
* This is because the request doesn't go through PCIe root complex i.e. it instead
* loops back. The need to DMA map arises only when accessing peer device's DOORBELL
*
* In contrast, all access requests for MMIO need to be DMA mapped without regard to
* device ownership. This is because access requests for MMIO go through PCIe root
* complex.
*
* This is accomplished in two steps:
* - Obtain DMA mapped address of DOORBELL or MMIO memory that could be used
* in updating requesting device's page table
* - Signal TTM to mark memory pointed to by requesting device's BO as GPU
* accessible. This allows an update of requesting device's page table
* with entries associated with DOOREBELL or MMIO memory
*
* This method is invoked in the following contexts:
* - Mapping of DOORBELL or MMIO BO of same or peer device
* - Validating an evicted DOOREBELL or MMIO BO on device seeking access
*
* Return: ZERO if successful, NON-ZERO otherwise
*/
static int
kfd_mem_dmamap_sg_bo(struct kgd_mem *mem,
struct kfd_mem_attachment *attachment)
{
struct ttm_operation_ctx ctx = {.interruptible = true};
struct amdgpu_bo *bo = attachment->bo_va->base.bo;
struct amdgpu_device *adev = attachment->adev;
struct ttm_tt *ttm = bo->tbo.ttm;
enum dma_data_direction dir;
dma_addr_t dma_addr;
bool mmio;
int ret;
/* Expect SG Table of dmapmap BO to be NULL */
mmio = (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP);
if (unlikely(ttm->sg)) {
pr_err("SG Table of %d BO for peer device is UNEXPECTEDLY NON-NULL", mmio);
return -EINVAL;
}
dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
dma_addr = mem->bo->tbo.sg->sgl->dma_address;
pr_debug("%d BO size: %d\n", mmio, mem->bo->tbo.sg->sgl->length);
pr_debug("%d BO address before DMA mapping: %llx\n", mmio, dma_addr);
dma_addr = dma_map_resource(adev->dev, dma_addr,
mem->bo->tbo.sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC);
ret = dma_mapping_error(adev->dev, dma_addr);
if (unlikely(ret))
return ret;
pr_debug("%d BO address after DMA mapping: %llx\n", mmio, dma_addr);
ttm->sg = create_sg_table(dma_addr, mem->bo->tbo.sg->sgl->length);
if (unlikely(!ttm->sg)) {
ret = -ENOMEM;
goto unmap_sg;
}
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (unlikely(ret))
goto free_sg;
return ret;
free_sg:
sg_free_table(ttm->sg);
kfree(ttm->sg);
ttm->sg = NULL;
unmap_sg:
dma_unmap_resource(adev->dev, dma_addr, mem->bo->tbo.sg->sgl->length,
dir, DMA_ATTR_SKIP_CPU_SYNC);
return ret;
}
static int
kfd_mem_dmamap_attachment(struct kgd_mem *mem,
struct kfd_mem_attachment *attachment)
{
switch (attachment->type) {
case KFD_MEM_ATT_SHARED:
return 0;
case KFD_MEM_ATT_USERPTR:
return kfd_mem_dmamap_userptr(mem, attachment);
case KFD_MEM_ATT_DMABUF:
return kfd_mem_dmamap_dmabuf(attachment);
case KFD_MEM_ATT_SG:
return kfd_mem_dmamap_sg_bo(mem, attachment);
default:
WARN_ON_ONCE(1);
}
return -EINVAL;
}
static void
kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
struct kfd_mem_attachment *attachment)
{
enum dma_data_direction direction =
mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
struct ttm_operation_ctx ctx = {.interruptible = false};
struct amdgpu_bo *bo = attachment->bo_va->base.bo;
struct amdgpu_device *adev = attachment->adev;
struct ttm_tt *ttm = bo->tbo.ttm;
if (unlikely(!ttm->sg))
return;
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
sg_free_table(ttm->sg);
kfree(ttm->sg);
ttm->sg = NULL;
}
static void
kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment)
{
/* This is a no-op. We don't want to trigger eviction fences when
* unmapping DMABufs. Therefore the invalidation (moving to system
* domain) is done in kfd_mem_dmamap_dmabuf.
*/
}
/**
* kfd_mem_dmaunmap_sg_bo() - Free DMA mapped sg_table of DOORBELL or MMIO BO
* @mem: SG BO of the DOORBELL or MMIO resource on the owning device
* @attachment: Virtual address attachment of the BO on accessing device
*
* The method performs following steps:
* - Signal TTM to mark memory pointed to by BO as GPU inaccessible
* - Free SG Table that is used to encapsulate DMA mapped memory of
* peer device's DOORBELL or MMIO memory
*
* This method is invoked in the following contexts:
* UNMapping of DOORBELL or MMIO BO on a device having access to its memory
* Eviction of DOOREBELL or MMIO BO on device having access to its memory
*
* Return: void
*/
static void
kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem,
struct kfd_mem_attachment *attachment)
{
struct ttm_operation_ctx ctx = {.interruptible = true};
struct amdgpu_bo *bo = attachment->bo_va->base.bo;
struct amdgpu_device *adev = attachment->adev;
struct ttm_tt *ttm = bo->tbo.ttm;
enum dma_data_direction dir;
if (unlikely(!ttm->sg)) {
pr_err("SG Table of BO is UNEXPECTEDLY NULL");
return;
}
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
dma_unmap_resource(adev->dev, ttm->sg->sgl->dma_address,
ttm->sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC);
sg_free_table(ttm->sg);
kfree(ttm->sg);
ttm->sg = NULL;
bo->tbo.sg = NULL;
}
static void
kfd_mem_dmaunmap_attachment(struct kgd_mem *mem,
struct kfd_mem_attachment *attachment)
{
switch (attachment->type) {
case KFD_MEM_ATT_SHARED:
break;
case KFD_MEM_ATT_USERPTR:
kfd_mem_dmaunmap_userptr(mem, attachment);
break;
case KFD_MEM_ATT_DMABUF:
kfd_mem_dmaunmap_dmabuf(attachment);
break;
case KFD_MEM_ATT_SG:
kfd_mem_dmaunmap_sg_bo(mem, attachment);
break;
default:
WARN_ON_ONCE(1);
}
}
static int kfd_mem_export_dmabuf(struct kgd_mem *mem)
{
if (!mem->dmabuf) {
struct dma_buf *ret = amdgpu_gem_prime_export(
&mem->bo->tbo.base,
mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
DRM_RDWR : 0);
if (IS_ERR(ret))
return PTR_ERR(ret);
mem->dmabuf = ret;
}
return 0;
}
static int
kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
struct amdgpu_bo **bo)
{
struct drm_gem_object *gobj;
int ret;
ret = kfd_mem_export_dmabuf(mem);
if (ret)
return ret;
gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf);
if (IS_ERR(gobj))
return PTR_ERR(gobj);
*bo = gem_to_amdgpu_bo(gobj);
(*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
return 0;
}
/* kfd_mem_attach - Add a BO to a VM
*
* Everything that needs to bo done only once when a BO is first added
* to a VM. It can later be mapped and unmapped many times without
* repeating these steps.
*
* 0. Create BO for DMA mapping, if needed
* 1. Allocate and initialize BO VA entry data structure
* 2. Add BO to the VM
* 3. Determine ASIC-specific PTE flags
* 4. Alloc page tables and directories if needed
* 4a. Validate new page tables and directories
*/
static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
struct amdgpu_vm *vm, bool is_aql)
{
struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
unsigned long bo_size = mem->bo->tbo.base.size;
uint64_t va = mem->va;
struct kfd_mem_attachment *attachment[2] = {NULL, NULL};
struct amdgpu_bo *bo[2] = {NULL, NULL};
bool same_hive = false;
int i, ret;
if (!va) {
pr_err("Invalid VA when adding BO to VM\n");
return -EINVAL;
}
/* Determine access to VRAM, MMIO and DOORBELL BOs of peer devices
*
* The access path of MMIO and DOORBELL BOs of is always over PCIe.
* In contrast the access path of VRAM BOs depens upon the type of
* link that connects the peer device. Access over PCIe is allowed
* if peer device has large BAR. In contrast, access over xGMI is
* allowed for both small and large BAR configurations of peer device
*/
if ((adev != bo_adev && !adev->gmc.is_app_apu) &&
((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) ||
(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) ||
(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
if (mem->domain == AMDGPU_GEM_DOMAIN_VRAM)
same_hive = amdgpu_xgmi_same_hive(adev, bo_adev);
if (!same_hive && !amdgpu_device_is_peer_accessible(bo_adev, adev))
return -EINVAL;
}
for (i = 0; i <= is_aql; i++) {
attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL);
if (unlikely(!attachment[i])) {
ret = -ENOMEM;
goto unwind;
}
pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
va + bo_size, vm);
if ((adev == bo_adev && !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) ||
(amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && reuse_dmamap(adev, bo_adev)) ||
same_hive) {
/* Mappings on the local GPU, or VRAM mappings in the
* local hive, or userptr mapping can reuse dma map
* address space share the original BO
*/
attachment[i]->type = KFD_MEM_ATT_SHARED;
bo[i] = mem->bo;
drm_gem_object_get(&bo[i]->tbo.base);
} else if (i > 0) {
/* Multiple mappings on the same GPU share the BO */
attachment[i]->type = KFD_MEM_ATT_SHARED;
bo[i] = bo[0];
drm_gem_object_get(&bo[i]->tbo.base);
} else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
/* Create an SG BO to DMA-map userptrs on other GPUs */
attachment[i]->type = KFD_MEM_ATT_USERPTR;
ret = create_dmamap_sg_bo(adev, mem, &bo[i]);
if (ret)
goto unwind;
/* Handle DOORBELL BOs of peer devices and MMIO BOs of local and peer devices */
} else if (mem->bo->tbo.type == ttm_bo_type_sg) {
WARN_ONCE(!(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL ||
mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP),
"Handing invalid SG BO in ATTACH request");
attachment[i]->type = KFD_MEM_ATT_SG;
ret = create_dmamap_sg_bo(adev, mem, &bo[i]);
if (ret)
goto unwind;
/* Enable acces to GTT and VRAM BOs of peer devices */
} else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT ||
mem->domain == AMDGPU_GEM_DOMAIN_VRAM) {
attachment[i]->type = KFD_MEM_ATT_DMABUF;
ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]);
if (ret)
goto unwind;
pr_debug("Employ DMABUF mechanism to enable peer GPU access\n");
} else {
WARN_ONCE(true, "Handling invalid ATTACH request");
ret = -EINVAL;
goto unwind;
}
/* Add BO to VM internal data structures */
ret = amdgpu_bo_reserve(bo[i], false);
if (ret) {
pr_debug("Unable to reserve BO during memory attach");
goto unwind;
}
attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]);
amdgpu_bo_unreserve(bo[i]);
if (unlikely(!attachment[i]->bo_va)) {
ret = -ENOMEM;
pr_err("Failed to add BO object to VM. ret == %d\n",
ret);
goto unwind;
}
attachment[i]->va = va;
attachment[i]->pte_flags = get_pte_flags(adev, mem);
attachment[i]->adev = adev;
list_add(&attachment[i]->list, &mem->attachments);
va += bo_size;
}
return 0;
unwind:
for (; i >= 0; i--) {
if (!attachment[i])
continue;
if (attachment[i]->bo_va) {
amdgpu_bo_reserve(bo[i], true);
amdgpu_vm_bo_del(adev, attachment[i]->bo_va);
amdgpu_bo_unreserve(bo[i]);
list_del(&attachment[i]->list);
}
if (bo[i])
drm_gem_object_put(&bo[i]->tbo.base);
kfree(attachment[i]);
}
return ret;
}
static void kfd_mem_detach(struct kfd_mem_attachment *attachment)
{
struct amdgpu_bo *bo = attachment->bo_va->base.bo;
pr_debug("\t remove VA 0x%llx in entry %p\n",
attachment->va, attachment);
amdgpu_vm_bo_del(attachment->adev, attachment->bo_va);
drm_gem_object_put(&bo->tbo.base);
list_del(&attachment->list);
kfree(attachment);
}
static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
struct amdkfd_process_info *process_info,
bool userptr)
{
mutex_lock(&process_info->lock);
if (userptr)
list_add_tail(&mem->validate_list,
&process_info->userptr_valid_list);
else
list_add_tail(&mem->validate_list, &process_info->kfd_bo_list);
mutex_unlock(&process_info->lock);
}
static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
struct amdkfd_process_info *process_info)
{
mutex_lock(&process_info->lock);
list_del(&mem->validate_list);
mutex_unlock(&process_info->lock);
}
/* Initializes user pages. It registers the MMU notifier and validates
* the userptr BO in the GTT domain.
*
* The BO must already be on the userptr_valid_list. Otherwise an
* eviction and restore may happen that leaves the new BO unmapped
* with the user mode queues running.
*
* Takes the process_info->lock to protect against concurrent restore
* workers.
*
* Returns 0 for success, negative errno for errors.
*/
static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
bool criu_resume)
{
struct amdkfd_process_info *process_info = mem->process_info;
struct amdgpu_bo *bo = mem->bo;
struct ttm_operation_ctx ctx = { true, false };
struct hmm_range *range;
int ret = 0;
mutex_lock(&process_info->lock);
ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
if (ret) {
pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
goto out;
}
ret = amdgpu_hmm_register(bo, user_addr);
if (ret) {
pr_err("%s: Failed to register MMU notifier: %d\n",
__func__, ret);
goto out;
}
if (criu_resume) {
/*
* During a CRIU restore operation, the userptr buffer objects
* will be validated in the restore_userptr_work worker at a
* later stage when it is scheduled by another ioctl called by
* CRIU master process for the target pid for restore.
*/
mutex_lock(&process_info->notifier_lock);
mem->invalid++;
mutex_unlock(&process_info->notifier_lock);
mutex_unlock(&process_info->lock);
return 0;
}
ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, &range);
if (ret) {
pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
goto unregister_out;
}
ret = amdgpu_bo_reserve(bo, true);
if (ret) {
pr_err("%s: Failed to reserve BO\n", __func__);
goto release_out;
}
amdgpu_bo_placement_from_domain(bo, mem->domain);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
pr_err("%s: failed to validate BO\n", __func__);
amdgpu_bo_unreserve(bo);
release_out:
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
unregister_out:
if (ret)
amdgpu_hmm_unregister(bo);
out:
mutex_unlock(&process_info->lock);
return ret;
}
/* Reserving a BO and its page table BOs must happen atomically to
* avoid deadlocks. Some operations update multiple VMs at once. Track
* all the reservation info in a context structure. Optionally a sync
* object can track VM updates.
*/
struct bo_vm_reservation_context {
/* DRM execution context for the reservation */
struct drm_exec exec;
/* Number of VMs reserved */
unsigned int n_vms;
/* Pointer to sync object */
struct amdgpu_sync *sync;
};
enum bo_vm_match {
BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */
BO_VM_MAPPED, /* Match VMs where a BO is mapped */
BO_VM_ALL, /* Match all VMs a BO was added to */
};
/**
* reserve_bo_and_vm - reserve a BO and a VM unconditionally.
* @mem: KFD BO structure.
* @vm: the VM to reserve.
* @ctx: the struct that will be used in unreserve_bo_and_vms().
*/
static int reserve_bo_and_vm(struct kgd_mem *mem,
struct amdgpu_vm *vm,
struct bo_vm_reservation_context *ctx)
{
struct amdgpu_bo *bo = mem->bo;
int ret;
WARN_ON(!vm);
ctx->n_vms = 1;
ctx->sync = &mem->sync;
drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
drm_exec_until_all_locked(&ctx->exec) {
ret = amdgpu_vm_lock_pd(vm, &ctx->exec, 2);
drm_exec_retry_on_contention(&ctx->exec);
if (unlikely(ret))
goto error;
ret = drm_exec_lock_obj(&ctx->exec, &bo->tbo.base);
drm_exec_retry_on_contention(&ctx->exec);
if (unlikely(ret))
goto error;
}
return 0;
error:
pr_err("Failed to reserve buffers in ttm.\n");
drm_exec_fini(&ctx->exec);
return ret;
}
/**
* reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
* @mem: KFD BO structure.
* @vm: the VM to reserve. If NULL, then all VMs associated with the BO
* is used. Otherwise, a single VM associated with the BO.
* @map_type: the mapping status that will be used to filter the VMs.
* @ctx: the struct that will be used in unreserve_bo_and_vms().
*
* Returns 0 for success, negative for failure.
*/
static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
struct amdgpu_vm *vm, enum bo_vm_match map_type,
struct bo_vm_reservation_context *ctx)
{
struct kfd_mem_attachment *entry;
struct amdgpu_bo *bo = mem->bo;
int ret;
ctx->sync = &mem->sync;
drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
drm_exec_until_all_locked(&ctx->exec) {
ctx->n_vms = 0;
list_for_each_entry(entry, &mem->attachments, list) {
if ((vm && vm != entry->bo_va->base.vm) ||
(entry->is_mapped != map_type
&& map_type != BO_VM_ALL))
continue;
ret = amdgpu_vm_lock_pd(entry->bo_va->base.vm,
&ctx->exec, 2);
drm_exec_retry_on_contention(&ctx->exec);
if (unlikely(ret))
goto error;
++ctx->n_vms;
}
ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1);
drm_exec_retry_on_contention(&ctx->exec);
if (unlikely(ret))
goto error;
}
return 0;
error:
pr_err("Failed to reserve buffers in ttm.\n");
drm_exec_fini(&ctx->exec);
return ret;
}
/**
* unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
* @ctx: Reservation context to unreserve
* @wait: Optionally wait for a sync object representing pending VM updates
* @intr: Whether the wait is interruptible
*
* Also frees any resources allocated in
* reserve_bo_and_(cond_)vm(s). Returns the status from
* amdgpu_sync_wait.
*/
static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
bool wait, bool intr)
{
int ret = 0;
if (wait)
ret = amdgpu_sync_wait(ctx->sync, intr);
drm_exec_fini(&ctx->exec);
ctx->sync = NULL;
return ret;
}
static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
struct kfd_mem_attachment *entry,
struct amdgpu_sync *sync)
{
struct amdgpu_bo_va *bo_va = entry->bo_va;
struct amdgpu_device *adev = entry->adev;
struct amdgpu_vm *vm = bo_va->base.vm;
amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
amdgpu_sync_fence(sync, bo_va->last_pt_update);
kfd_mem_dmaunmap_attachment(mem, entry);
}
static int update_gpuvm_pte(struct kgd_mem *mem,
struct kfd_mem_attachment *entry,
struct amdgpu_sync *sync)
{
struct amdgpu_bo_va *bo_va = entry->bo_va;
struct amdgpu_device *adev = entry->adev;
int ret;
ret = kfd_mem_dmamap_attachment(mem, entry);
if (ret)
return ret;
/* Update the page tables */
ret = amdgpu_vm_bo_update(adev, bo_va, false);
if (ret) {
pr_err("amdgpu_vm_bo_update failed\n");
return ret;
}
return amdgpu_sync_fence(sync, bo_va->last_pt_update);
}
static int map_bo_to_gpuvm(struct kgd_mem *mem,
struct kfd_mem_attachment *entry,
struct amdgpu_sync *sync,
bool no_update_pte)
{
int ret;
/* Set virtual address for the allocation */
ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0,
amdgpu_bo_size(entry->bo_va->base.bo),
entry->pte_flags);
if (ret) {
pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
entry->va, ret);
return ret;
}
if (no_update_pte)
return 0;
ret = update_gpuvm_pte(mem, entry, sync);
if (ret) {
pr_err("update_gpuvm_pte() failed\n");
goto update_gpuvm_pte_failed;
}
return 0;
update_gpuvm_pte_failed:
unmap_bo_from_gpuvm(mem, entry, sync);
return ret;
}
static int process_validate_vms(struct amdkfd_process_info *process_info)
{
struct amdgpu_vm *peer_vm;
int ret;
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
ret = vm_validate_pt_pd_bos(peer_vm);
if (ret)
return ret;
}
return 0;
}
static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
struct amdgpu_sync *sync)
{
struct amdgpu_vm *peer_vm;
int ret;
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
struct amdgpu_bo *pd = peer_vm->root.bo;
ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
AMDGPU_SYNC_NE_OWNER,
AMDGPU_FENCE_OWNER_KFD);
if (ret)
return ret;
}
return 0;
}
static int process_update_pds(struct amdkfd_process_info *process_info,
struct amdgpu_sync *sync)
{
struct amdgpu_vm *peer_vm;
int ret;
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
ret = vm_update_pds(peer_vm, sync);
if (ret)
return ret;
}
return 0;
}
static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
struct dma_fence **ef)
{
struct amdkfd_process_info *info = NULL;
int ret;
if (!*process_info) {
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
mutex_init(&info->lock);
mutex_init(&info->notifier_lock);
INIT_LIST_HEAD(&info->vm_list_head);
INIT_LIST_HEAD(&info->kfd_bo_list);
INIT_LIST_HEAD(&info->userptr_valid_list);
INIT_LIST_HEAD(&info->userptr_inval_list);
info->eviction_fence =
amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
current->mm,
NULL);
if (!info->eviction_fence) {
pr_err("Failed to create eviction fence\n");
ret = -ENOMEM;
goto create_evict_fence_fail;
}
info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
INIT_DELAYED_WORK(&info->restore_userptr_work,
amdgpu_amdkfd_restore_userptr_worker);
*process_info = info;
*ef = dma_fence_get(&info->eviction_fence->base);
}
vm->process_info = *process_info;
/* Validate page directory and attach eviction fence */
ret = amdgpu_bo_reserve(vm->root.bo, true);
if (ret)
goto reserve_pd_fail;
ret = vm_validate_pt_pd_bos(vm);
if (ret) {
pr_err("validate_pt_pd_bos() failed\n");
goto validate_pd_fail;
}
ret = amdgpu_bo_sync_wait(vm->root.bo,
AMDGPU_FENCE_OWNER_KFD, false);
if (ret)
goto wait_pd_fail;
ret = dma_resv_reserve_fences(vm->root.bo->tbo.base.resv, 1);
if (ret)
goto reserve_shared_fail;
dma_resv_add_fence(vm->root.bo->tbo.base.resv,
&vm->process_info->eviction_fence->base,
DMA_RESV_USAGE_BOOKKEEP);
amdgpu_bo_unreserve(vm->root.bo);
/* Update process info */
mutex_lock(&vm->process_info->lock);
list_add_tail(&vm->vm_list_node,
&(vm->process_info->vm_list_head));
vm->process_info->n_vms++;
mutex_unlock(&vm->process_info->lock);
return 0;
reserve_shared_fail:
wait_pd_fail:
validate_pd_fail:
amdgpu_bo_unreserve(vm->root.bo);
reserve_pd_fail:
vm->process_info = NULL;
if (info) {
/* Two fence references: one in info and one in *ef */
dma_fence_put(&info->eviction_fence->base);
dma_fence_put(*ef);
*ef = NULL;
*process_info = NULL;
put_pid(info->pid);
create_evict_fence_fail:
mutex_destroy(&info->lock);
mutex_destroy(&info->notifier_lock);
kfree(info);
}
return ret;
}
/**
* amdgpu_amdkfd_gpuvm_pin_bo() - Pins a BO using following criteria
* @bo: Handle of buffer object being pinned
* @domain: Domain into which BO should be pinned
*
* - USERPTR BOs are UNPINNABLE and will return error
* - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
* PIN count incremented. It is valid to PIN a BO multiple times
*
* Return: ZERO if successful in pinning, Non-Zero in case of error.
*/
static int amdgpu_amdkfd_gpuvm_pin_bo(struct amdgpu_bo *bo, u32 domain)
{
int ret = 0;
ret = amdgpu_bo_reserve(bo, false);
if (unlikely(ret))
return ret;
ret = amdgpu_bo_pin_restricted(bo, domain, 0, 0);
if (ret)
pr_err("Error in Pinning BO to domain: %d\n", domain);
amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
amdgpu_bo_unreserve(bo);
return ret;
}
/**
* amdgpu_amdkfd_gpuvm_unpin_bo() - Unpins BO using following criteria
* @bo: Handle of buffer object being unpinned
*
* - Is a illegal request for USERPTR BOs and is ignored
* - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
* PIN count decremented. Calls to UNPIN must balance calls to PIN
*/
static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo)
{
int ret = 0;
ret = amdgpu_bo_reserve(bo, false);
if (unlikely(ret))
return;
amdgpu_bo_unpin(bo);
amdgpu_bo_unreserve(bo);
}
int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
struct amdgpu_vm *avm, u32 pasid)
{
int ret;
/* Free the original amdgpu allocated pasid,
* will be replaced with kfd allocated pasid.
*/
if (avm->pasid) {
amdgpu_pasid_free(avm->pasid);
amdgpu_vm_set_pasid(adev, avm, 0);
}
ret = amdgpu_vm_set_pasid(adev, avm, pasid);
if (ret)
return ret;
return 0;
}
int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
struct amdgpu_vm *avm,
void **process_info,
struct dma_fence **ef)
{
int ret;
/* Already a compute VM? */
if (avm->process_info)
return -EINVAL;
/* Convert VM into a compute VM */
ret = amdgpu_vm_make_compute(adev, avm);
if (ret)
return ret;
/* Initialize KFD part of the VM and process info */
ret = init_kfd_vm(avm, process_info, ef);
if (ret)
return ret;
amdgpu_vm_set_task_info(avm);
return 0;
}
void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
struct amdkfd_process_info *process_info = vm->process_info;
if (!process_info)
return;
/* Update process info */
mutex_lock(&process_info->lock);
process_info->n_vms--;
list_del(&vm->vm_list_node);
mutex_unlock(&process_info->lock);
vm->process_info = NULL;
/* Release per-process resources when last compute VM is destroyed */
if (!process_info->n_vms) {
WARN_ON(!list_empty(&process_info->kfd_bo_list));
WARN_ON(!list_empty(&process_info->userptr_valid_list));
WARN_ON(!list_empty(&process_info->userptr_inval_list));
dma_fence_put(&process_info->eviction_fence->base);
cancel_delayed_work_sync(&process_info->restore_userptr_work);
put_pid(process_info->pid);
mutex_destroy(&process_info->lock);
mutex_destroy(&process_info->notifier_lock);
kfree(process_info);
}
}
void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
void *drm_priv)
{
struct amdgpu_vm *avm;
if (WARN_ON(!adev || !drm_priv))
return;
avm = drm_priv_to_vm(drm_priv);
pr_debug("Releasing process vm %p\n", avm);
/* The original pasid of amdgpu vm has already been
* released during making a amdgpu vm to a compute vm
* The current pasid is managed by kfd and will be
* released on kfd process destroy. Set amdgpu pasid
* to 0 to avoid duplicate release.
*/
amdgpu_vm_release_compute(adev, avm);
}
uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
{
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
struct amdgpu_bo *pd = avm->root.bo;
struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
if (adev->asic_type < CHIP_VEGA10)
return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
return avm->pd_phys_addr;
}
void amdgpu_amdkfd_block_mmu_notifications(void *p)
{
struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p;
mutex_lock(&pinfo->lock);
WRITE_ONCE(pinfo->block_mmu_notifications, true);
mutex_unlock(&pinfo->lock);
}
int amdgpu_amdkfd_criu_resume(void *p)
{
int ret = 0;
struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p;
mutex_lock(&pinfo->lock);
pr_debug("scheduling work\n");
mutex_lock(&pinfo->notifier_lock);
pinfo->evicted_bos++;
mutex_unlock(&pinfo->notifier_lock);
if (!READ_ONCE(pinfo->block_mmu_notifications)) {
ret = -EINVAL;
goto out_unlock;
}
WRITE_ONCE(pinfo->block_mmu_notifications, false);
schedule_delayed_work(&pinfo->restore_userptr_work, 0);
out_unlock:
mutex_unlock(&pinfo->lock);
return ret;
}
size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
uint8_t xcp_id)
{
uint64_t reserved_for_pt =
ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
ssize_t available;
uint64_t vram_available, system_mem_available, ttm_mem_available;
spin_lock(&kfd_mem_limit.mem_limit_lock);
vram_available = KFD_XCP_MEMORY_SIZE(adev, xcp_id)
- adev->kfd.vram_used_aligned[xcp_id]
- atomic64_read(&adev->vram_pin_size)
- reserved_for_pt;
if (adev->gmc.is_app_apu) {
system_mem_available = no_system_mem_limit ?
kfd_mem_limit.max_system_mem_limit :
kfd_mem_limit.max_system_mem_limit -
kfd_mem_limit.system_mem_used;
ttm_mem_available = kfd_mem_limit.max_ttm_mem_limit -
kfd_mem_limit.ttm_mem_used;
available = min3(system_mem_available, ttm_mem_available,
vram_available);
available = ALIGN_DOWN(available, PAGE_SIZE);
} else {
available = ALIGN_DOWN(vram_available, VRAM_AVAILABLITY_ALIGN);
}
spin_unlock(&kfd_mem_limit.mem_limit_lock);
if (available < 0)
available = 0;
return available;
}
int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
struct amdgpu_device *adev, uint64_t va, uint64_t size,
void *drm_priv, struct kgd_mem **mem,
uint64_t *offset, uint32_t flags, bool criu_resume)
{
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
struct amdgpu_fpriv *fpriv = container_of(avm, struct amdgpu_fpriv, vm);
enum ttm_bo_type bo_type = ttm_bo_type_device;
struct sg_table *sg = NULL;
uint64_t user_addr = 0;
struct amdgpu_bo *bo;
struct drm_gem_object *gobj = NULL;
u32 domain, alloc_domain;
uint64_t aligned_size;
int8_t xcp_id = -1;
u64 alloc_flags;
int ret;
/*
* Check on which domain to allocate BO
*/
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
if (adev->gmc.is_app_apu) {
domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_flags = 0;
} else {
alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
}
xcp_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ?
0 : fpriv->xcp_id;
} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_flags = 0;
} else {
domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE;
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
if (!offset || !*offset)
return -EINVAL;
user_addr = untagged_addr(*offset);
} else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
bo_type = ttm_bo_type_sg;
if (size > UINT_MAX)
return -EINVAL;
sg = create_sg_table(*offset, size);
if (!sg)
return -ENOMEM;
} else {
return -EINVAL;
}
}
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT)
alloc_flags |= AMDGPU_GEM_CREATE_COHERENT;
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED)
alloc_flags |= AMDGPU_GEM_CREATE_UNCACHED;
*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
if (!*mem) {
ret = -ENOMEM;
goto err;
}
INIT_LIST_HEAD(&(*mem)->attachments);
mutex_init(&(*mem)->lock);
(*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
/* Workaround for AQL queue wraparound bug. Map the same
* memory twice. That means we only actually allocate half
* the memory.
*/
if ((*mem)->aql_queue)
size >>= 1;
aligned_size = PAGE_ALIGN(size);
(*mem)->alloc_flags = flags;
amdgpu_sync_create(&(*mem)->sync);
ret = amdgpu_amdkfd_reserve_mem_limit(adev, aligned_size, flags,
xcp_id);
if (ret) {
pr_debug("Insufficient memory\n");
goto err_reserve_limit;
}
pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s xcp_id %d\n",
va, (*mem)->aql_queue ? size << 1 : size,
domain_string(alloc_domain), xcp_id);
ret = amdgpu_gem_object_create(adev, aligned_size, 1, alloc_domain, alloc_flags,
bo_type, NULL, &gobj, xcp_id + 1);
if (ret) {
pr_debug("Failed to create BO on domain %s. ret %d\n",
domain_string(alloc_domain), ret);
goto err_bo_create;
}
ret = drm_vma_node_allow(&gobj->vma_node, drm_priv);
if (ret) {
pr_debug("Failed to allow vma node access. ret %d\n", ret);
goto err_node_allow;
}
bo = gem_to_amdgpu_bo(gobj);
if (bo_type == ttm_bo_type_sg) {
bo->tbo.sg = sg;
bo->tbo.ttm->sg = sg;
}
bo->kfd_bo = *mem;
(*mem)->bo = bo;
if (user_addr)
bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO;
(*mem)->va = va;
(*mem)->domain = domain;
(*mem)->mapped_to_gpu_memory = 0;
(*mem)->process_info = avm->process_info;
add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
if (user_addr) {
pr_debug("creating userptr BO for user_addr = %llx\n", user_addr);
ret = init_user_pages(*mem, user_addr, criu_resume);
if (ret)
goto allocate_init_user_pages_failed;
} else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
ret = amdgpu_amdkfd_gpuvm_pin_bo(bo, AMDGPU_GEM_DOMAIN_GTT);
if (ret) {
pr_err("Pinning MMIO/DOORBELL BO during ALLOC FAILED\n");
goto err_pin_bo;
}
bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
}
if (offset)
*offset = amdgpu_bo_mmap_offset(bo);
return 0;
allocate_init_user_pages_failed:
err_pin_bo:
remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
drm_vma_node_revoke(&gobj->vma_node, drm_priv);
err_node_allow:
/* Don't unreserve system mem limit twice */
goto err_reserve_limit;
err_bo_create:
amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags, xcp_id);
err_reserve_limit:
mutex_destroy(&(*mem)->lock);
if (gobj)
drm_gem_object_put(gobj);
else
kfree(*mem);
err:
if (sg) {
sg_free_table(sg);
kfree(sg);
}
return ret;
}
int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
uint64_t *size)
{
struct amdkfd_process_info *process_info = mem->process_info;
unsigned long bo_size = mem->bo->tbo.base.size;
bool use_release_notifier = (mem->bo->kfd_bo == mem);
struct kfd_mem_attachment *entry, *tmp;
struct bo_vm_reservation_context ctx;
unsigned int mapped_to_gpu_memory;
int ret;
bool is_imported = false;
mutex_lock(&mem->lock);
/* Unpin MMIO/DOORBELL BO's that were pinned during allocation */
if (mem->alloc_flags &
(KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
amdgpu_amdkfd_gpuvm_unpin_bo(mem->bo);
}
mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
is_imported = mem->is_imported;
mutex_unlock(&mem->lock);
/* lock is not needed after this, since mem is unused and will
* be freed anyway
*/
if (mapped_to_gpu_memory > 0) {
pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
mem->va, bo_size);
return -EBUSY;
}
/* Make sure restore workers don't access the BO any more */
mutex_lock(&process_info->lock);
list_del(&mem->validate_list);
mutex_unlock(&process_info->lock);
/* Cleanup user pages and MMU notifiers */
if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
amdgpu_hmm_unregister(mem->bo);
mutex_lock(&process_info->notifier_lock);
amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range);
mutex_unlock(&process_info->notifier_lock);
}
ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
if (unlikely(ret))
return ret;
/* The eviction fence should be removed by the last unmap.
* TODO: Log an error condition if the bo still has the eviction fence
* attached
*/
amdgpu_amdkfd_remove_eviction_fence(mem->bo,
process_info->eviction_fence);
pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
mem->va + bo_size * (1 + mem->aql_queue));
/* Remove from VM internal data structures */
list_for_each_entry_safe(entry, tmp, &mem->attachments, list)
kfd_mem_detach(entry);
ret = unreserve_bo_and_vms(&ctx, false, false);
/* Free the sync object */
amdgpu_sync_free(&mem->sync);
/* If the SG is not NULL, it's one we created for a doorbell or mmio
* remap BO. We need to free it.
*/
if (mem->bo->tbo.sg) {
sg_free_table(mem->bo->tbo.sg);
kfree(mem->bo->tbo.sg);
}
/* Update the size of the BO being freed if it was allocated from
* VRAM and is not imported. For APP APU VRAM allocations are done
* in GTT domain
*/
if (size) {
if (!is_imported &&
(mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM ||
(adev->gmc.is_app_apu &&
mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT)))
*size = bo_size;
else
*size = 0;
}
/* Free the BO*/
drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
if (mem->dmabuf)
dma_buf_put(mem->dmabuf);
mutex_destroy(&mem->lock);
/* If this releases the last reference, it will end up calling
* amdgpu_amdkfd_release_notify and kfree the mem struct. That's why
* this needs to be the last call here.
*/
drm_gem_object_put(&mem->bo->tbo.base);
/*
* For kgd_mem allocated in amdgpu_amdkfd_gpuvm_import_dmabuf(),
* explicitly free it here.
*/
if (!use_release_notifier)
kfree(mem);
return ret;
}
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
struct amdgpu_device *adev, struct kgd_mem *mem,
void *drm_priv)
{
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
int ret;
struct amdgpu_bo *bo;
uint32_t domain;
struct kfd_mem_attachment *entry;
struct bo_vm_reservation_context ctx;
unsigned long bo_size;
bool is_invalid_userptr = false;
bo = mem->bo;
if (!bo) {
pr_err("Invalid BO when mapping memory to GPU\n");
return -EINVAL;
}
/* Make sure restore is not running concurrently. Since we
* don't map invalid userptr BOs, we rely on the next restore
* worker to do the mapping
*/
mutex_lock(&mem->process_info->lock);
/* Lock notifier lock. If we find an invalid userptr BO, we can be
* sure that the MMU notifier is no longer running
* concurrently and the queues are actually stopped
*/
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
mutex_lock(&mem->process_info->notifier_lock);
is_invalid_userptr = !!mem->invalid;
mutex_unlock(&mem->process_info->notifier_lock);
}
mutex_lock(&mem->lock);
domain = mem->domain;
bo_size = bo->tbo.base.size;
pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
mem->va,
mem->va + bo_size * (1 + mem->aql_queue),
avm, domain_string(domain));
if (!kfd_mem_is_attached(avm, mem)) {
ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue);
if (ret)
goto out;
}
ret = reserve_bo_and_vm(mem, avm, &ctx);
if (unlikely(ret))
goto out;
/* Userptr can be marked as "not invalid", but not actually be
* validated yet (still in the system domain). In that case
* the queues are still stopped and we can leave mapping for
* the next restore worker
*/
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
is_invalid_userptr = true;
ret = vm_validate_pt_pd_bos(avm);
if (unlikely(ret))
goto out_unreserve;
if (mem->mapped_to_gpu_memory == 0 &&
!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
/* Validate BO only once. The eviction fence gets added to BO
* the first time it is mapped. Validate will wait for all
* background evictions to complete.
*/
ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
if (ret) {
pr_debug("Validate failed\n");
goto out_unreserve;
}
}
list_for_each_entry(entry, &mem->attachments, list) {
if (entry->bo_va->base.vm != avm || entry->is_mapped)
continue;
pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
entry->va, entry->va + bo_size, entry);
ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
is_invalid_userptr);
if (ret) {
pr_err("Failed to map bo to gpuvm\n");
goto out_unreserve;
}
ret = vm_update_pds(avm, ctx.sync);
if (ret) {
pr_err("Failed to update page directories\n");
goto out_unreserve;
}
entry->is_mapped = true;
mem->mapped_to_gpu_memory++;
pr_debug("\t INC mapping count %d\n",
mem->mapped_to_gpu_memory);
}
if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
dma_resv_add_fence(bo->tbo.base.resv,
&avm->process_info->eviction_fence->base,
DMA_RESV_USAGE_BOOKKEEP);
ret = unreserve_bo_and_vms(&ctx, false, false);
goto out;
out_unreserve:
unreserve_bo_and_vms(&ctx, false, false);
out:
mutex_unlock(&mem->process_info->lock);
mutex_unlock(&mem->lock);
return ret;
}
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv)
{
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
struct amdkfd_process_info *process_info = avm->process_info;
unsigned long bo_size = mem->bo->tbo.base.size;
struct kfd_mem_attachment *entry;
struct bo_vm_reservation_context ctx;
int ret;
mutex_lock(&mem->lock);
ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx);
if (unlikely(ret))
goto out;
/* If no VMs were reserved, it means the BO wasn't actually mapped */
if (ctx.n_vms == 0) {
ret = -EINVAL;
goto unreserve_out;
}
ret = vm_validate_pt_pd_bos(avm);
if (unlikely(ret))
goto unreserve_out;
pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
mem->va,
mem->va + bo_size * (1 + mem->aql_queue),
avm);
list_for_each_entry(entry, &mem->attachments, list) {
if (entry->bo_va->base.vm != avm || !entry->is_mapped)
continue;
pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
entry->va, entry->va + bo_size, entry);
unmap_bo_from_gpuvm(mem, entry, ctx.sync);
entry->is_mapped = false;
mem->mapped_to_gpu_memory--;
pr_debug("\t DEC mapping count %d\n",
mem->mapped_to_gpu_memory);
}
/* If BO is unmapped from all VMs, unfence it. It can be evicted if
* required.
*/
if (mem->mapped_to_gpu_memory == 0 &&
!amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) &&
!mem->bo->tbo.pin_count)
amdgpu_amdkfd_remove_eviction_fence(mem->bo,
process_info->eviction_fence);
unreserve_out:
unreserve_bo_and_vms(&ctx, false, false);
out:
mutex_unlock(&mem->lock);
return ret;
}
int amdgpu_amdkfd_gpuvm_sync_memory(
struct amdgpu_device *adev, struct kgd_mem *mem, bool intr)
{
struct amdgpu_sync sync;
int ret;
amdgpu_sync_create(&sync);
mutex_lock(&mem->lock);
amdgpu_sync_clone(&mem->sync, &sync);
mutex_unlock(&mem->lock);
ret = amdgpu_sync_wait(&sync, intr);
amdgpu_sync_free(&sync);
return ret;
}
/**
* amdgpu_amdkfd_map_gtt_bo_to_gart - Map BO to GART and increment reference count
* @adev: Device to which allocated BO belongs
* @bo: Buffer object to be mapped
*
* Before return, bo reference count is incremented. To release the reference and unpin/
* unmap the BO, call amdgpu_amdkfd_free_gtt_mem.
*/
int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_bo *bo)
{
int ret;
ret = amdgpu_bo_reserve(bo, true);
if (ret) {
pr_err("Failed to reserve bo. ret %d\n", ret);
goto err_reserve_bo_failed;
}
ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
if (ret) {
pr_err("Failed to pin bo. ret %d\n", ret);
goto err_pin_bo_failed;
}
ret = amdgpu_ttm_alloc_gart(&bo->tbo);
if (ret) {
pr_err("Failed to bind bo to GART. ret %d\n", ret);
goto err_map_bo_gart_failed;
}
amdgpu_amdkfd_remove_eviction_fence(
bo, bo->vm_bo->vm->process_info->eviction_fence);
amdgpu_bo_unreserve(bo);
bo = amdgpu_bo_ref(bo);
return 0;
err_map_bo_gart_failed:
amdgpu_bo_unpin(bo);
err_pin_bo_failed:
amdgpu_bo_unreserve(bo);
err_reserve_bo_failed:
return ret;
}
/** amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() - Map a GTT BO for kernel CPU access
*
* @mem: Buffer object to be mapped for CPU access
* @kptr[out]: pointer in kernel CPU address space
* @size[out]: size of the buffer
*
* Pins the BO and maps it for kernel CPU access. The eviction fence is removed
* from the BO, since pinned BOs cannot be evicted. The bo must remain on the
* validate_list, so the GPU mapping can be restored after a page table was
* evicted.
*
* Return: 0 on success, error code on failure
*/
int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
void **kptr, uint64_t *size)
{
int ret;
struct amdgpu_bo *bo = mem->bo;
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
pr_err("userptr can't be mapped to kernel\n");
return -EINVAL;
}
mutex_lock(&mem->process_info->lock);
ret = amdgpu_bo_reserve(bo, true);
if (ret) {
pr_err("Failed to reserve bo. ret %d\n", ret);
goto bo_reserve_failed;
}
ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
if (ret) {
pr_err("Failed to pin bo. ret %d\n", ret);
goto pin_failed;
}
ret = amdgpu_bo_kmap(bo, kptr);
if (ret) {
pr_err("Failed to map bo to kernel. ret %d\n", ret);
goto kmap_failed;
}
amdgpu_amdkfd_remove_eviction_fence(
bo, mem->process_info->eviction_fence);
if (size)
*size = amdgpu_bo_size(bo);
amdgpu_bo_unreserve(bo);
mutex_unlock(&mem->process_info->lock);
return 0;
kmap_failed:
amdgpu_bo_unpin(bo);
pin_failed:
amdgpu_bo_unreserve(bo);
bo_reserve_failed:
mutex_unlock(&mem->process_info->lock);
return ret;
}
/** amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() - Unmap a GTT BO for kernel CPU access
*
* @mem: Buffer object to be unmapped for CPU access
*
* Removes the kernel CPU mapping and unpins the BO. It does not restore the
* eviction fence, so this function should only be used for cleanup before the
* BO is destroyed.
*/
void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem)
{
struct amdgpu_bo *bo = mem->bo;
amdgpu_bo_reserve(bo, true);
amdgpu_bo_kunmap(bo);
amdgpu_bo_unpin(bo);
amdgpu_bo_unreserve(bo);
}
int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
struct kfd_vm_fault_info *mem)
{
if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
*mem = *adev->gmc.vm_fault_info;
mb(); /* make sure read happened */
atomic_set(&adev->gmc.vm_fault_info_updated, 0);
}
return 0;
}
int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
struct dma_buf *dma_buf,
uint64_t va, void *drm_priv,
struct kgd_mem **mem, uint64_t *size,
uint64_t *mmap_offset)
{
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
struct drm_gem_object *obj;
struct amdgpu_bo *bo;
int ret;
obj = amdgpu_gem_prime_import(adev_to_drm(adev), dma_buf);
if (IS_ERR(obj))
return PTR_ERR(obj);
bo = gem_to_amdgpu_bo(obj);
if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT))) {
/* Only VRAM and GTT BOs are supported */
ret = -EINVAL;
goto err_put_obj;
}
*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
if (!*mem) {
ret = -ENOMEM;
goto err_put_obj;
}
ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
if (ret)
goto err_free_mem;
if (size)
*size = amdgpu_bo_size(bo);
if (mmap_offset)
*mmap_offset = amdgpu_bo_mmap_offset(bo);
INIT_LIST_HEAD(&(*mem)->attachments);
mutex_init(&(*mem)->lock);
(*mem)->alloc_flags =
((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
| KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
| KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
get_dma_buf(dma_buf);
(*mem)->dmabuf = dma_buf;
(*mem)->bo = bo;
(*mem)->va = va;
(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) && !adev->gmc.is_app_apu ?
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
(*mem)->mapped_to_gpu_memory = 0;
(*mem)->process_info = avm->process_info;
add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
amdgpu_sync_create(&(*mem)->sync);
(*mem)->is_imported = true;
return 0;
err_free_mem:
kfree(*mem);
err_put_obj:
drm_gem_object_put(obj);
return ret;
}
int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem,
struct dma_buf **dma_buf)
{
int ret;
mutex_lock(&mem->lock);
ret = kfd_mem_export_dmabuf(mem);
if (ret)
goto out;
get_dma_buf(mem->dmabuf);
*dma_buf = mem->dmabuf;
out:
mutex_unlock(&mem->lock);
return ret;
}
/* Evict a userptr BO by stopping the queues if necessary
*
* Runs in MMU notifier, may be in RECLAIM_FS context. This means it
* cannot do any memory allocations, and cannot take any locks that
* are held elsewhere while allocating memory.
*
* It doesn't do anything to the BO itself. The real work happens in
* restore, where we get updated page addresses. This function only
* ensures that GPU access to the BO is stopped.
*/
int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
unsigned long cur_seq, struct kgd_mem *mem)
{
struct amdkfd_process_info *process_info = mem->process_info;
int r = 0;
/* Do not process MMU notifications during CRIU restore until
* KFD_CRIU_OP_RESUME IOCTL is received
*/
if (READ_ONCE(process_info->block_mmu_notifications))
return 0;
mutex_lock(&process_info->notifier_lock);
mmu_interval_set_seq(mni, cur_seq);
mem->invalid++;
if (++process_info->evicted_bos == 1) {
/* First eviction, stop the queues */
r = kgd2kfd_quiesce_mm(mni->mm,
KFD_QUEUE_EVICTION_TRIGGER_USERPTR);
if (r)
pr_err("Failed to quiesce KFD\n");
schedule_delayed_work(&process_info->restore_userptr_work,
msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
}
mutex_unlock(&process_info->notifier_lock);
return r;
}
/* Update invalid userptr BOs
*
* Moves invalidated (evicted) userptr BOs from userptr_valid_list to
* userptr_inval_list and updates user pages for all BOs that have
* been invalidated since their last update.
*/
static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
struct mm_struct *mm)
{
struct kgd_mem *mem, *tmp_mem;
struct amdgpu_bo *bo;
struct ttm_operation_ctx ctx = { false, false };
uint32_t invalid;
int ret = 0;
mutex_lock(&process_info->notifier_lock);
/* Move all invalidated BOs to the userptr_inval_list */
list_for_each_entry_safe(mem, tmp_mem,
&process_info->userptr_valid_list,
validate_list)
if (mem->invalid)
list_move_tail(&mem->validate_list,
&process_info->userptr_inval_list);
/* Go through userptr_inval_list and update any invalid user_pages */
list_for_each_entry(mem, &process_info->userptr_inval_list,
validate_list) {
invalid = mem->invalid;
if (!invalid)
/* BO hasn't been invalidated since the last
* revalidation attempt. Keep its page list.
*/
continue;
bo = mem->bo;
amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range);
mem->range = NULL;
/* BO reservations and getting user pages (hmm_range_fault)
* must happen outside the notifier lock
*/
mutex_unlock(&process_info->notifier_lock);
/* Move the BO to system (CPU) domain if necessary to unmap
* and free the SG table
*/
if (bo->tbo.resource->mem_type != TTM_PL_SYSTEM) {
if (amdgpu_bo_reserve(bo, true))
return -EAGAIN;
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
amdgpu_bo_unreserve(bo);
if (ret) {
pr_err("%s: Failed to invalidate userptr BO\n",
__func__);
return -EAGAIN;
}
}
/* Get updated user pages */
ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
&mem->range);
if (ret) {
pr_debug("Failed %d to get user pages\n", ret);
/* Return -EFAULT bad address error as success. It will
* fail later with a VM fault if the GPU tries to access
* it. Better than hanging indefinitely with stalled
* user mode queues.
*
* Return other error -EBUSY or -ENOMEM to retry restore
*/
if (ret != -EFAULT)
return ret;
ret = 0;
}
mutex_lock(&process_info->notifier_lock);
/* Mark the BO as valid unless it was invalidated
* again concurrently.
*/
if (mem->invalid != invalid) {
ret = -EAGAIN;
goto unlock_out;
}
/* set mem valid if mem has hmm range associated */
if (mem->range)
mem->invalid = 0;
}
unlock_out:
mutex_unlock(&process_info->notifier_lock);
return ret;
}
/* Validate invalid userptr BOs
*
* Validates BOs on the userptr_inval_list. Also updates GPUVM page tables
* with new page addresses and waits for the page table updates to complete.
*/
static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
{
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_sync sync;
struct drm_exec exec;
struct amdgpu_vm *peer_vm;
struct kgd_mem *mem, *tmp_mem;
struct amdgpu_bo *bo;
int ret;
amdgpu_sync_create(&sync);
drm_exec_init(&exec, 0);
/* Reserve all BOs and page tables for validation */
drm_exec_until_all_locked(&exec) {
/* Reserve all the page directories */
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2);
drm_exec_retry_on_contention(&exec);
if (unlikely(ret))
goto unreserve_out;
}
/* Reserve the userptr_inval_list entries to resv_list */
list_for_each_entry(mem, &process_info->userptr_inval_list,
validate_list) {
struct drm_gem_object *gobj;
gobj = &mem->bo->tbo.base;
ret = drm_exec_prepare_obj(&exec, gobj, 1);
drm_exec_retry_on_contention(&exec);
if (unlikely(ret))
goto unreserve_out;
}
}
ret = process_validate_vms(process_info);
if (ret)
goto unreserve_out;
/* Validate BOs and update GPUVM page tables */
list_for_each_entry_safe(mem, tmp_mem,
&process_info->userptr_inval_list,
validate_list) {
struct kfd_mem_attachment *attachment;
bo = mem->bo;
/* Validate the BO if we got user pages */
if (bo->tbo.ttm->pages[0]) {
amdgpu_bo_placement_from_domain(bo, mem->domain);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret) {
pr_err("%s: failed to validate BO\n", __func__);
goto unreserve_out;
}
}
/* Update mapping. If the BO was not validated
* (because we couldn't get user pages), this will
* clear the page table entries, which will result in
* VM faults if the GPU tries to access the invalid
* memory.
*/
list_for_each_entry(attachment, &mem->attachments, list) {
if (!attachment->is_mapped)
continue;
kfd_mem_dmaunmap_attachment(mem, attachment);
ret = update_gpuvm_pte(mem, attachment, &sync);
if (ret) {
pr_err("%s: update PTE failed\n", __func__);
/* make sure this gets validated again */
mutex_lock(&process_info->notifier_lock);
mem->invalid++;
mutex_unlock(&process_info->notifier_lock);
goto unreserve_out;
}
}
}
/* Update page directories */
ret = process_update_pds(process_info, &sync);
unreserve_out:
drm_exec_fini(&exec);
amdgpu_sync_wait(&sync, false);
amdgpu_sync_free(&sync);
return ret;
}
/* Confirm that all user pages are valid while holding the notifier lock
*
* Moves valid BOs from the userptr_inval_list back to userptr_val_list.
*/
static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_info)
{
struct kgd_mem *mem, *tmp_mem;
int ret = 0;
list_for_each_entry_safe(mem, tmp_mem,
&process_info->userptr_inval_list,
validate_list) {
bool valid;
/* keep mem without hmm range at userptr_inval_list */
if (!mem->range)
continue;
/* Only check mem with hmm range associated */
valid = amdgpu_ttm_tt_get_user_pages_done(
mem->bo->tbo.ttm, mem->range);
mem->range = NULL;
if (!valid) {
WARN(!mem->invalid, "Invalid BO not marked invalid");
ret = -EAGAIN;
continue;
}
if (mem->invalid) {
WARN(1, "Valid BO is marked invalid");
ret = -EAGAIN;
continue;
}
list_move_tail(&mem->validate_list,
&process_info->userptr_valid_list);
}
return ret;
}
/* Worker callback to restore evicted userptr BOs
*
* Tries to update and validate all userptr BOs. If successful and no
* concurrent evictions happened, the queues are restarted. Otherwise,
* reschedule for another attempt later.
*/
static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct amdkfd_process_info *process_info =
container_of(dwork, struct amdkfd_process_info,
restore_userptr_work);
struct task_struct *usertask;
struct mm_struct *mm;
uint32_t evicted_bos;
mutex_lock(&process_info->notifier_lock);
evicted_bos = process_info->evicted_bos;
mutex_unlock(&process_info->notifier_lock);
if (!evicted_bos)
return;
/* Reference task and mm in case of concurrent process termination */
usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
if (!usertask)
return;
mm = get_task_mm(usertask);
if (!mm) {
put_task_struct(usertask);
return;
}
mutex_lock(&process_info->lock);
if (update_invalid_user_pages(process_info, mm))
goto unlock_out;
/* userptr_inval_list can be empty if all evicted userptr BOs
* have been freed. In that case there is nothing to validate
* and we can just restart the queues.
*/
if (!list_empty(&process_info->userptr_inval_list)) {
if (validate_invalid_user_pages(process_info))
goto unlock_out;
}
/* Final check for concurrent evicton and atomic update. If
* another eviction happens after successful update, it will
* be a first eviction that calls quiesce_mm. The eviction
* reference counting inside KFD will handle this case.
*/
mutex_lock(&process_info->notifier_lock);
if (process_info->evicted_bos != evicted_bos)
goto unlock_notifier_out;
if (confirm_valid_user_pages_locked(process_info)) {
WARN(1, "User pages unexpectedly invalid");
goto unlock_notifier_out;
}
process_info->evicted_bos = evicted_bos = 0;
if (kgd2kfd_resume_mm(mm)) {
pr_err("%s: Failed to resume KFD\n", __func__);
/* No recovery from this failure. Probably the CP is
* hanging. No point trying again.
*/
}
unlock_notifier_out:
mutex_unlock(&process_info->notifier_lock);
unlock_out:
mutex_unlock(&process_info->lock);
/* If validation failed, reschedule another attempt */
if (evicted_bos) {
schedule_delayed_work(&process_info->restore_userptr_work,
msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
kfd_smi_event_queue_restore_rescheduled(mm);
}
mmput(mm);
put_task_struct(usertask);
}
/** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
* KFD process identified by process_info
*
* @process_info: amdkfd_process_info of the KFD process
*
* After memory eviction, restore thread calls this function. The function
* should be called when the Process is still valid. BO restore involves -
*
* 1. Release old eviction fence and create new one
* 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
* 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of
* BOs that need to be reserved.
* 4. Reserve all the BOs
* 5. Validate of PD and PT BOs.
* 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence
* 7. Add fence to all PD and PT BOs.
* 8. Unreserve all BOs
*/
int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
{
struct amdkfd_process_info *process_info = info;
struct amdgpu_vm *peer_vm;
struct kgd_mem *mem;
struct amdgpu_amdkfd_fence *new_fence;
struct list_head duplicate_save;
struct amdgpu_sync sync_obj;
unsigned long failed_size = 0;
unsigned long total_size = 0;
struct drm_exec exec;
int ret;
INIT_LIST_HEAD(&duplicate_save);
mutex_lock(&process_info->lock);
drm_exec_init(&exec, 0);
drm_exec_until_all_locked(&exec) {
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2);
drm_exec_retry_on_contention(&exec);
if (unlikely(ret))
goto ttm_reserve_fail;
}
/* Reserve all BOs and page tables/directory. Add all BOs from
* kfd_bo_list to ctx.list
*/
list_for_each_entry(mem, &process_info->kfd_bo_list,
validate_list) {
struct drm_gem_object *gobj;
gobj = &mem->bo->tbo.base;
ret = drm_exec_prepare_obj(&exec, gobj, 1);
drm_exec_retry_on_contention(&exec);
if (unlikely(ret))
goto ttm_reserve_fail;
}
}
amdgpu_sync_create(&sync_obj);
/* Validate PDs and PTs */
ret = process_validate_vms(process_info);
if (ret)
goto validate_map_fail;
ret = process_sync_pds_resv(process_info, &sync_obj);
if (ret) {
pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
goto validate_map_fail;
}
/* Validate BOs and map them to GPUVM (update VM page tables). */
list_for_each_entry(mem, &process_info->kfd_bo_list,
validate_list) {
struct amdgpu_bo *bo = mem->bo;
uint32_t domain = mem->domain;
struct kfd_mem_attachment *attachment;
struct dma_resv_iter cursor;
struct dma_fence *fence;
total_size += amdgpu_bo_size(bo);
ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
if (ret) {
pr_debug("Memory eviction: Validate BOs failed\n");
failed_size += amdgpu_bo_size(bo);
ret = amdgpu_amdkfd_bo_validate(bo,
AMDGPU_GEM_DOMAIN_GTT, false);
if (ret) {
pr_debug("Memory eviction: Try again\n");
goto validate_map_fail;
}
}
dma_resv_for_each_fence(&cursor, bo->tbo.base.resv,
DMA_RESV_USAGE_KERNEL, fence) {
ret = amdgpu_sync_fence(&sync_obj, fence);
if (ret) {
pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
goto validate_map_fail;
}
}
list_for_each_entry(attachment, &mem->attachments, list) {
if (!attachment->is_mapped)
continue;
if (attachment->bo_va->base.bo->tbo.pin_count)
continue;
kfd_mem_dmaunmap_attachment(mem, attachment);
ret = update_gpuvm_pte(mem, attachment, &sync_obj);
if (ret) {
pr_debug("Memory eviction: update PTE failed. Try again\n");
goto validate_map_fail;
}
}
}
if (failed_size)
pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
/* Update page directories */
ret = process_update_pds(process_info, &sync_obj);
if (ret) {
pr_debug("Memory eviction: update PDs failed. Try again\n");
goto validate_map_fail;
}
/* Wait for validate and PT updates to finish */
amdgpu_sync_wait(&sync_obj, false);
/* Release old eviction fence and create new one, because fence only
* goes from unsignaled to signaled, fence cannot be reused.
* Use context and mm from the old fence.
*/
new_fence = amdgpu_amdkfd_fence_create(
process_info->eviction_fence->base.context,
process_info->eviction_fence->mm,
NULL);
if (!new_fence) {
pr_err("Failed to create eviction fence\n");
ret = -ENOMEM;
goto validate_map_fail;
}
dma_fence_put(&process_info->eviction_fence->base);
process_info->eviction_fence = new_fence;
*ef = dma_fence_get(&new_fence->base);
/* Attach new eviction fence to all BOs except pinned ones */
list_for_each_entry(mem, &process_info->kfd_bo_list, validate_list) {
if (mem->bo->tbo.pin_count)
continue;
dma_resv_add_fence(mem->bo->tbo.base.resv,
&process_info->eviction_fence->base,
DMA_RESV_USAGE_BOOKKEEP);
}
/* Attach eviction fence to PD / PT BOs */
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
struct amdgpu_bo *bo = peer_vm->root.bo;
dma_resv_add_fence(bo->tbo.base.resv,
&process_info->eviction_fence->base,
DMA_RESV_USAGE_BOOKKEEP);
}
validate_map_fail:
amdgpu_sync_free(&sync_obj);
ttm_reserve_fail:
drm_exec_fini(&exec);
mutex_unlock(&process_info->lock);
return ret;
}
int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
{
struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
int ret;
if (!info || !gws)
return -EINVAL;
*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
if (!*mem)
return -ENOMEM;
mutex_init(&(*mem)->lock);
INIT_LIST_HEAD(&(*mem)->attachments);
(*mem)->bo = amdgpu_bo_ref(gws_bo);
(*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
(*mem)->process_info = process_info;
add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
amdgpu_sync_create(&(*mem)->sync);
/* Validate gws bo the first time it is added to process */
mutex_lock(&(*mem)->process_info->lock);
ret = amdgpu_bo_reserve(gws_bo, false);
if (unlikely(ret)) {
pr_err("Reserve gws bo failed %d\n", ret);
goto bo_reservation_failure;
}
ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
if (ret) {
pr_err("GWS BO validate failed %d\n", ret);
goto bo_validation_failure;
}
/* GWS resource is shared b/t amdgpu and amdkfd
* Add process eviction fence to bo so they can
* evict each other.
*/
ret = dma_resv_reserve_fences(gws_bo->tbo.base.resv, 1);
if (ret)
goto reserve_shared_fail;
dma_resv_add_fence(gws_bo->tbo.base.resv,
&process_info->eviction_fence->base,
DMA_RESV_USAGE_BOOKKEEP);
amdgpu_bo_unreserve(gws_bo);
mutex_unlock(&(*mem)->process_info->lock);
return ret;
reserve_shared_fail:
bo_validation_failure:
amdgpu_bo_unreserve(gws_bo);
bo_reservation_failure:
mutex_unlock(&(*mem)->process_info->lock);
amdgpu_sync_free(&(*mem)->sync);
remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
amdgpu_bo_unref(&gws_bo);
mutex_destroy(&(*mem)->lock);
kfree(*mem);
*mem = NULL;
return ret;
}
int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
{
int ret;
struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
struct amdgpu_bo *gws_bo = kgd_mem->bo;
/* Remove BO from process's validate list so restore worker won't touch
* it anymore
*/
remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
ret = amdgpu_bo_reserve(gws_bo, false);
if (unlikely(ret)) {
pr_err("Reserve gws bo failed %d\n", ret);
//TODO add BO back to validate_list?
return ret;
}
amdgpu_amdkfd_remove_eviction_fence(gws_bo,
process_info->eviction_fence);
amdgpu_bo_unreserve(gws_bo);
amdgpu_sync_free(&kgd_mem->sync);
amdgpu_bo_unref(&gws_bo);
mutex_destroy(&kgd_mem->lock);
kfree(mem);
return 0;
}
/* Returns GPU-specific tiling mode information */
int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
struct tile_config *config)
{
config->gb_addr_config = adev->gfx.config.gb_addr_config;
config->tile_config_ptr = adev->gfx.config.tile_mode_array;
config->num_tile_configs =
ARRAY_SIZE(adev->gfx.config.tile_mode_array);
config->macro_tile_config_ptr =
adev->gfx.config.macrotile_mode_array;
config->num_macro_tile_configs =
ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
/* Those values are not set from GFX9 onwards */
config->num_banks = adev->gfx.config.num_banks;
config->num_ranks = adev->gfx.config.num_ranks;
return 0;
}
bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem)
{
struct kfd_mem_attachment *entry;
list_for_each_entry(entry, &mem->attachments, list) {
if (entry->is_mapped && entry->adev == adev)
return true;
}
return false;
}
#if defined(CONFIG_DEBUG_FS)
int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data)
{
spin_lock(&kfd_mem_limit.mem_limit_lock);
seq_printf(m, "System mem used %lldM out of %lluM\n",
(kfd_mem_limit.system_mem_used >> 20),
(kfd_mem_limit.max_system_mem_limit >> 20));
seq_printf(m, "TTM mem used %lldM out of %lluM\n",
(kfd_mem_limit.ttm_mem_used >> 20),
(kfd_mem_limit.max_ttm_mem_limit >> 20));
spin_unlock(&kfd_mem_limit.mem_limit_lock);
return 0;
}
#endif
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c |
// SPDX-License-Identifier: MIT
/*
* Copyright 2014 Advanced Micro Devices, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
/*
* Authors:
* Christian König <[email protected]>
*/
#include <linux/dma-fence-chain.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
struct amdgpu_sync_entry {
struct hlist_node node;
struct dma_fence *fence;
};
static struct kmem_cache *amdgpu_sync_slab;
/**
* amdgpu_sync_create - zero init sync object
*
* @sync: sync object to initialize
*
* Just clear the sync object for now.
*/
void amdgpu_sync_create(struct amdgpu_sync *sync)
{
hash_init(sync->fences);
}
/**
* amdgpu_sync_same_dev - test if fence belong to us
*
* @adev: amdgpu device to use for the test
* @f: fence to test
*
* Test if the fence was issued by us.
*/
static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
struct dma_fence *f)
{
struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
if (s_fence) {
struct amdgpu_ring *ring;
ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
return ring->adev == adev;
}
return false;
}
/**
* amdgpu_sync_get_owner - extract the owner of a fence
*
* @f: fence get the owner from
*
* Extract who originally created the fence.
*/
static void *amdgpu_sync_get_owner(struct dma_fence *f)
{
struct drm_sched_fence *s_fence;
struct amdgpu_amdkfd_fence *kfd_fence;
if (!f)
return AMDGPU_FENCE_OWNER_UNDEFINED;
s_fence = to_drm_sched_fence(f);
if (s_fence)
return s_fence->owner;
kfd_fence = to_amdgpu_amdkfd_fence(f);
if (kfd_fence)
return AMDGPU_FENCE_OWNER_KFD;
return AMDGPU_FENCE_OWNER_UNDEFINED;
}
/**
* amdgpu_sync_keep_later - Keep the later fence
*
* @keep: existing fence to test
* @fence: new fence
*
* Either keep the existing fence or the new one, depending which one is later.
*/
static void amdgpu_sync_keep_later(struct dma_fence **keep,
struct dma_fence *fence)
{
if (*keep && dma_fence_is_later(*keep, fence))
return;
dma_fence_put(*keep);
*keep = dma_fence_get(fence);
}
/**
* amdgpu_sync_add_later - add the fence to the hash
*
* @sync: sync object to add the fence to
* @f: fence to add
*
* Tries to add the fence to an existing hash entry. Returns true when an entry
* was found, false otherwise.
*/
static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
{
struct amdgpu_sync_entry *e;
hash_for_each_possible(sync->fences, e, node, f->context) {
if (unlikely(e->fence->context != f->context))
continue;
amdgpu_sync_keep_later(&e->fence, f);
return true;
}
return false;
}
/**
* amdgpu_sync_fence - remember to sync to this fence
*
* @sync: sync object to add fence to
* @f: fence to sync to
*
* Add the fence to the sync object.
*/
int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f)
{
struct amdgpu_sync_entry *e;
if (!f)
return 0;
if (amdgpu_sync_add_later(sync, f))
return 0;
e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
if (!e)
return -ENOMEM;
hash_add(sync->fences, &e->node, f->context);
e->fence = dma_fence_get(f);
return 0;
}
/* Determine based on the owner and mode if we should sync to a fence or not */
static bool amdgpu_sync_test_fence(struct amdgpu_device *adev,
enum amdgpu_sync_mode mode,
void *owner, struct dma_fence *f)
{
void *fence_owner = amdgpu_sync_get_owner(f);
/* Always sync to moves, no matter what */
if (fence_owner == AMDGPU_FENCE_OWNER_UNDEFINED)
return true;
/* We only want to trigger KFD eviction fences on
* evict or move jobs. Skip KFD fences otherwise.
*/
if (fence_owner == AMDGPU_FENCE_OWNER_KFD &&
owner != AMDGPU_FENCE_OWNER_UNDEFINED)
return false;
/* Never sync to VM updates either. */
if (fence_owner == AMDGPU_FENCE_OWNER_VM &&
owner != AMDGPU_FENCE_OWNER_UNDEFINED)
return false;
/* Ignore fences depending on the sync mode */
switch (mode) {
case AMDGPU_SYNC_ALWAYS:
return true;
case AMDGPU_SYNC_NE_OWNER:
if (amdgpu_sync_same_dev(adev, f) &&
fence_owner == owner)
return false;
break;
case AMDGPU_SYNC_EQ_OWNER:
if (amdgpu_sync_same_dev(adev, f) &&
fence_owner != owner)
return false;
break;
case AMDGPU_SYNC_EXPLICIT:
return false;
}
WARN(debug_evictions && fence_owner == AMDGPU_FENCE_OWNER_KFD,
"Adding eviction fence to sync obj");
return true;
}
/**
* amdgpu_sync_resv - sync to a reservation object
*
* @adev: amdgpu device
* @sync: sync object to add fences from reservation object to
* @resv: reservation object with embedded fence
* @mode: how owner affects which fences we sync to
* @owner: owner of the planned job submission
*
* Sync to the fence
*/
int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
struct dma_resv *resv, enum amdgpu_sync_mode mode,
void *owner)
{
struct dma_resv_iter cursor;
struct dma_fence *f;
int r;
if (resv == NULL)
return -EINVAL;
/* TODO: Use DMA_RESV_USAGE_READ here */
dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, f) {
dma_fence_chain_for_each(f, f) {
struct dma_fence *tmp = dma_fence_chain_contained(f);
if (amdgpu_sync_test_fence(adev, mode, owner, tmp)) {
r = amdgpu_sync_fence(sync, f);
dma_fence_put(f);
if (r)
return r;
break;
}
}
}
return 0;
}
/* Free the entry back to the slab */
static void amdgpu_sync_entry_free(struct amdgpu_sync_entry *e)
{
hash_del(&e->node);
dma_fence_put(e->fence);
kmem_cache_free(amdgpu_sync_slab, e);
}
/**
* amdgpu_sync_peek_fence - get the next fence not signaled yet
*
* @sync: the sync object
* @ring: optional ring to use for test
*
* Returns the next fence not signaled yet without removing it from the sync
* object.
*/
struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct amdgpu_ring *ring)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
int i;
hash_for_each_safe(sync->fences, i, tmp, e, node) {
struct dma_fence *f = e->fence;
struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
if (dma_fence_is_signaled(f)) {
amdgpu_sync_entry_free(e);
continue;
}
if (ring && s_fence) {
/* For fences from the same ring it is sufficient
* when they are scheduled.
*/
if (s_fence->sched == &ring->sched) {
if (dma_fence_is_signaled(&s_fence->scheduled))
continue;
return &s_fence->scheduled;
}
}
return f;
}
return NULL;
}
/**
* amdgpu_sync_get_fence - get the next fence from the sync object
*
* @sync: sync object to use
*
* Get and removes the next fence from the sync object not signaled yet.
*/
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
struct dma_fence *f;
int i;
hash_for_each_safe(sync->fences, i, tmp, e, node) {
f = e->fence;
hash_del(&e->node);
kmem_cache_free(amdgpu_sync_slab, e);
if (!dma_fence_is_signaled(f))
return f;
dma_fence_put(f);
}
return NULL;
}
/**
* amdgpu_sync_clone - clone a sync object
*
* @source: sync object to clone
* @clone: pointer to destination sync object
*
* Adds references to all unsignaled fences in @source to @clone. Also
* removes signaled fences from @source while at it.
*/
int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
struct dma_fence *f;
int i, r;
hash_for_each_safe(source->fences, i, tmp, e, node) {
f = e->fence;
if (!dma_fence_is_signaled(f)) {
r = amdgpu_sync_fence(clone, f);
if (r)
return r;
} else {
amdgpu_sync_entry_free(e);
}
}
return 0;
}
/**
* amdgpu_sync_push_to_job - push fences into job
* @sync: sync object to get the fences from
* @job: job to push the fences into
*
* Add all unsignaled fences from sync to job.
*/
int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
struct dma_fence *f;
int i, r;
hash_for_each_safe(sync->fences, i, tmp, e, node) {
f = e->fence;
if (dma_fence_is_signaled(f)) {
amdgpu_sync_entry_free(e);
continue;
}
dma_fence_get(f);
r = drm_sched_job_add_dependency(&job->base, f);
if (r) {
dma_fence_put(f);
return r;
}
}
return 0;
}
int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
int i, r;
hash_for_each_safe(sync->fences, i, tmp, e, node) {
r = dma_fence_wait(e->fence, intr);
if (r)
return r;
amdgpu_sync_entry_free(e);
}
return 0;
}
/**
* amdgpu_sync_free - free the sync object
*
* @sync: sync object to use
*
* Free the sync object.
*/
void amdgpu_sync_free(struct amdgpu_sync *sync)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
unsigned int i;
hash_for_each_safe(sync->fences, i, tmp, e, node)
amdgpu_sync_entry_free(e);
}
/**
* amdgpu_sync_init - init sync object subsystem
*
* Allocate the slab allocator.
*/
int amdgpu_sync_init(void)
{
amdgpu_sync_slab = kmem_cache_create(
"amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!amdgpu_sync_slab)
return -ENOMEM;
return 0;
}
/**
* amdgpu_sync_fini - fini sync object subsystem
*
* Free the slab allocator.
*/
void amdgpu_sync_fini(void)
{
kmem_cache_destroy(amdgpu_sync_slab);
}
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c |
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/pci.h>
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "amdgpu_ring.h"
#include "vi.h"
#include "vi_structs.h"
#include "vid.h"
#include "amdgpu_ucode.h"
#include "amdgpu_atombios.h"
#include "atombios_i2c.h"
#include "clearstate_vi.h"
#include "gmc/gmc_8_2_d.h"
#include "gmc/gmc_8_2_sh_mask.h"
#include "oss/oss_3_0_d.h"
#include "oss/oss_3_0_sh_mask.h"
#include "bif/bif_5_0_d.h"
#include "bif/bif_5_0_sh_mask.h"
#include "gca/gfx_8_0_d.h"
#include "gca/gfx_8_0_enum.h"
#include "gca/gfx_8_0_sh_mask.h"
#include "dce/dce_10_0_d.h"
#include "dce/dce_10_0_sh_mask.h"
#include "smu/smu_7_1_3_d.h"
#include "ivsrcid/ivsrcid_vislands30.h"
#define GFX8_NUM_GFX_RINGS 1
#define GFX8_MEC_HPD_SIZE 4096
#define TOPAZ_GB_ADDR_CONFIG_GOLDEN 0x22010001
#define CARRIZO_GB_ADDR_CONFIG_GOLDEN 0x22010001
#define POLARIS11_GB_ADDR_CONFIG_GOLDEN 0x22011002
#define TONGA_GB_ADDR_CONFIG_GOLDEN 0x22011003
#define ARRAY_MODE(x) ((x) << GB_TILE_MODE0__ARRAY_MODE__SHIFT)
#define PIPE_CONFIG(x) ((x) << GB_TILE_MODE0__PIPE_CONFIG__SHIFT)
#define TILE_SPLIT(x) ((x) << GB_TILE_MODE0__TILE_SPLIT__SHIFT)
#define MICRO_TILE_MODE_NEW(x) ((x) << GB_TILE_MODE0__MICRO_TILE_MODE_NEW__SHIFT)
#define SAMPLE_SPLIT(x) ((x) << GB_TILE_MODE0__SAMPLE_SPLIT__SHIFT)
#define BANK_WIDTH(x) ((x) << GB_MACROTILE_MODE0__BANK_WIDTH__SHIFT)
#define BANK_HEIGHT(x) ((x) << GB_MACROTILE_MODE0__BANK_HEIGHT__SHIFT)
#define MACRO_TILE_ASPECT(x) ((x) << GB_MACROTILE_MODE0__MACRO_TILE_ASPECT__SHIFT)
#define NUM_BANKS(x) ((x) << GB_MACROTILE_MODE0__NUM_BANKS__SHIFT)
#define RLC_CGTT_MGCG_OVERRIDE__CPF_MASK 0x00000001L
#define RLC_CGTT_MGCG_OVERRIDE__RLC_MASK 0x00000002L
#define RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK 0x00000004L
#define RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK 0x00000008L
#define RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK 0x00000010L
#define RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK 0x00000020L
/* BPM SERDES CMD */
#define SET_BPM_SERDES_CMD 1
#define CLE_BPM_SERDES_CMD 0
/* BPM Register Address*/
enum {
BPM_REG_CGLS_EN = 0, /* Enable/Disable CGLS */
BPM_REG_CGLS_ON, /* ON/OFF CGLS: shall be controlled by RLC FW */
BPM_REG_CGCG_OVERRIDE, /* Set/Clear CGCG Override */
BPM_REG_MGCG_OVERRIDE, /* Set/Clear MGCG Override */
BPM_REG_FGCG_OVERRIDE, /* Set/Clear FGCG Override */
BPM_REG_FGCG_MAX
};
#define RLC_FormatDirectRegListLength 14
MODULE_FIRMWARE("amdgpu/carrizo_ce.bin");
MODULE_FIRMWARE("amdgpu/carrizo_pfp.bin");
MODULE_FIRMWARE("amdgpu/carrizo_me.bin");
MODULE_FIRMWARE("amdgpu/carrizo_mec.bin");
MODULE_FIRMWARE("amdgpu/carrizo_mec2.bin");
MODULE_FIRMWARE("amdgpu/carrizo_rlc.bin");
MODULE_FIRMWARE("amdgpu/stoney_ce.bin");
MODULE_FIRMWARE("amdgpu/stoney_pfp.bin");
MODULE_FIRMWARE("amdgpu/stoney_me.bin");
MODULE_FIRMWARE("amdgpu/stoney_mec.bin");
MODULE_FIRMWARE("amdgpu/stoney_rlc.bin");
MODULE_FIRMWARE("amdgpu/tonga_ce.bin");
MODULE_FIRMWARE("amdgpu/tonga_pfp.bin");
MODULE_FIRMWARE("amdgpu/tonga_me.bin");
MODULE_FIRMWARE("amdgpu/tonga_mec.bin");
MODULE_FIRMWARE("amdgpu/tonga_mec2.bin");
MODULE_FIRMWARE("amdgpu/tonga_rlc.bin");
MODULE_FIRMWARE("amdgpu/topaz_ce.bin");
MODULE_FIRMWARE("amdgpu/topaz_pfp.bin");
MODULE_FIRMWARE("amdgpu/topaz_me.bin");
MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
MODULE_FIRMWARE("amdgpu/fiji_pfp.bin");
MODULE_FIRMWARE("amdgpu/fiji_me.bin");
MODULE_FIRMWARE("amdgpu/fiji_mec.bin");
MODULE_FIRMWARE("amdgpu/fiji_mec2.bin");
MODULE_FIRMWARE("amdgpu/fiji_rlc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_ce.bin");
MODULE_FIRMWARE("amdgpu/polaris10_ce_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_pfp.bin");
MODULE_FIRMWARE("amdgpu/polaris10_pfp_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_me.bin");
MODULE_FIRMWARE("amdgpu/polaris10_me_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mec.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mec_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mec2_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_ce.bin");
MODULE_FIRMWARE("amdgpu/polaris11_ce_2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin");
MODULE_FIRMWARE("amdgpu/polaris11_pfp_2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_me.bin");
MODULE_FIRMWARE("amdgpu/polaris11_me_2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mec.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mec_2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mec2_2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_ce.bin");
MODULE_FIRMWARE("amdgpu/polaris12_ce_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin");
MODULE_FIRMWARE("amdgpu/polaris12_pfp_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_me.bin");
MODULE_FIRMWARE("amdgpu/polaris12_me_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_mec.bin");
MODULE_FIRMWARE("amdgpu/polaris12_mec_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_mec2_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin");
MODULE_FIRMWARE("amdgpu/vegam_ce.bin");
MODULE_FIRMWARE("amdgpu/vegam_pfp.bin");
MODULE_FIRMWARE("amdgpu/vegam_me.bin");
MODULE_FIRMWARE("amdgpu/vegam_mec.bin");
MODULE_FIRMWARE("amdgpu/vegam_mec2.bin");
MODULE_FIRMWARE("amdgpu/vegam_rlc.bin");
static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
{
{mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
{mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1},
{mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2},
{mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3},
{mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4},
{mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5},
{mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6},
{mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7},
{mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8},
{mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9},
{mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10},
{mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11},
{mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12},
{mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13},
{mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14},
{mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15}
};
static const u32 golden_settings_tonga_a11[] =
{
mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
mmDB_DEBUG2, 0xf00fffff, 0x00000400,
mmGB_GPU_ID, 0x0000000f, 0x00000000,
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc,
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
mmTCC_CTRL, 0x00100000, 0xf31fff7f,
mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
mmTCP_ADDR_CONFIG, 0x000003ff, 0x000002fb,
mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b,
mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876,
mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
};
static const u32 tonga_golden_common_all[] =
{
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
};
static const u32 tonga_mgcg_cgcg_init[] =
{
mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
};
static const u32 golden_settings_vegam_a11[] =
{
mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
mmCB_HW_CONTROL_2, 0x0f000000, 0x0d000000,
mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
mmDB_DEBUG2, 0xf00fffff, 0x00000400,
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x3a00161a,
mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x0000002e,
mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
mmSQ_CONFIG, 0x07f80000, 0x01180000,
mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
mmTCC_CTRL, 0x00100000, 0xf31fff7f,
mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
mmTCP_CHAN_STEER_LO, 0xffffffff, 0x32761054,
mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
};
static const u32 vegam_golden_common_all[] =
{
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
};
static const u32 golden_settings_polaris11_a11[] =
{
mmCB_HW_CONTROL, 0x0000f3cf, 0x00007208,
mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
mmDB_DEBUG2, 0xf00fffff, 0x00000400,
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012,
mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
mmSQ_CONFIG, 0x07f80000, 0x01180000,
mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
mmTCC_CTRL, 0x00100000, 0xf31fff7f,
mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3,
mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003210,
mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
};
static const u32 polaris11_golden_common_all[] =
{
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002,
mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
};
static const u32 golden_settings_polaris10_a11[] =
{
mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
mmDB_DEBUG2, 0xf00fffff, 0x00000400,
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012,
mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x0000002a,
mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
mmSQ_CONFIG, 0x07f80000, 0x07180000,
mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
mmTCC_CTRL, 0x00100000, 0xf31fff7f,
mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
};
static const u32 polaris10_golden_common_all[] =
{
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
};
static const u32 fiji_golden_common_all[] =
{
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a,
mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e,
mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009,
};
static const u32 golden_settings_fiji_a10[] =
{
mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
mmDB_DEBUG2, 0xf00fffff, 0x00000400,
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
mmTCC_CTRL, 0x00100000, 0xf31fff7f,
mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff,
mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
};
static const u32 fiji_mgcg_cgcg_init[] =
{
mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
};
static const u32 golden_settings_iceland_a11[] =
{
mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
mmDB_DEBUG2, 0xf00fffff, 0x00000400,
mmDB_DEBUG3, 0xc0000000, 0xc0000000,
mmGB_GPU_ID, 0x0000000f, 0x00000000,
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x00000002,
mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
mmTCC_CTRL, 0x00100000, 0xf31fff7f,
mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f1,
mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010,
};
static const u32 iceland_golden_common_all[] =
{
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002,
mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
};
static const u32 iceland_mgcg_cgcg_init[] =
{
mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_CP_CLK_CTRL, 0xffffffff, 0xc0000100,
mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0xc0000100,
mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0xc0000100,
mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0xff000100,
mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87,
mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87,
mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
};
static const u32 cz_golden_settings_a11[] =
{
mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
mmDB_DEBUG2, 0xf00fffff, 0x00000400,
mmGB_GPU_ID, 0x0000000f, 0x00000000,
mmPA_SC_ENHANCE, 0xffffffff, 0x00000001,
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
mmTA_CNTL_AUX, 0x000f000f, 0x00010000,
mmTCC_CTRL, 0x00100000, 0xf31fff7f,
mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f3,
mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001302
};
static const u32 cz_golden_common_all[] =
{
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002,
mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
};
static const u32 cz_mgcg_cgcg_init[] =
{
mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
};
static const u32 stoney_golden_settings_a11[] =
{
mmDB_DEBUG2, 0xf00fffff, 0x00000400,
mmGB_GPU_ID, 0x0000000f, 0x00000000,
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
mmTCC_CTRL, 0x00100000, 0xf31fff7f,
mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f1,
mmTCP_CHAN_STEER_LO, 0xffffffff, 0x10101010,
};
static const u32 stoney_golden_common_all[] =
{
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000000,
mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
mmGB_ADDR_CONFIG, 0xffffffff, 0x12010001,
mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
};
static const u32 stoney_mgcg_cgcg_init[] =
{
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
mmCP_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
mmRLC_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
};
static const char * const sq_edc_source_names[] = {
"SQ_EDC_INFO_SOURCE_INVALID: No EDC error has occurred",
"SQ_EDC_INFO_SOURCE_INST: EDC source is Instruction Fetch",
"SQ_EDC_INFO_SOURCE_SGPR: EDC source is SGPR or SQC data return",
"SQ_EDC_INFO_SOURCE_VGPR: EDC source is VGPR",
"SQ_EDC_INFO_SOURCE_LDS: EDC source is LDS",
"SQ_EDC_INFO_SOURCE_GDS: EDC source is GDS",
"SQ_EDC_INFO_SOURCE_TA: EDC source is TA",
};
static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev);
static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev);
static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev);
static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev);
static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev);
static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring);
static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring);
#define CG_ACLK_CNTL__ACLK_DIVIDER_MASK 0x0000007fL
#define CG_ACLK_CNTL__ACLK_DIVIDER__SHIFT 0x00000000L
static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
{
uint32_t data;
switch (adev->asic_type) {
case CHIP_TOPAZ:
amdgpu_device_program_register_sequence(adev,
iceland_mgcg_cgcg_init,
ARRAY_SIZE(iceland_mgcg_cgcg_init));
amdgpu_device_program_register_sequence(adev,
golden_settings_iceland_a11,
ARRAY_SIZE(golden_settings_iceland_a11));
amdgpu_device_program_register_sequence(adev,
iceland_golden_common_all,
ARRAY_SIZE(iceland_golden_common_all));
break;
case CHIP_FIJI:
amdgpu_device_program_register_sequence(adev,
fiji_mgcg_cgcg_init,
ARRAY_SIZE(fiji_mgcg_cgcg_init));
amdgpu_device_program_register_sequence(adev,
golden_settings_fiji_a10,
ARRAY_SIZE(golden_settings_fiji_a10));
amdgpu_device_program_register_sequence(adev,
fiji_golden_common_all,
ARRAY_SIZE(fiji_golden_common_all));
break;
case CHIP_TONGA:
amdgpu_device_program_register_sequence(adev,
tonga_mgcg_cgcg_init,
ARRAY_SIZE(tonga_mgcg_cgcg_init));
amdgpu_device_program_register_sequence(adev,
golden_settings_tonga_a11,
ARRAY_SIZE(golden_settings_tonga_a11));
amdgpu_device_program_register_sequence(adev,
tonga_golden_common_all,
ARRAY_SIZE(tonga_golden_common_all));
break;
case CHIP_VEGAM:
amdgpu_device_program_register_sequence(adev,
golden_settings_vegam_a11,
ARRAY_SIZE(golden_settings_vegam_a11));
amdgpu_device_program_register_sequence(adev,
vegam_golden_common_all,
ARRAY_SIZE(vegam_golden_common_all));
break;
case CHIP_POLARIS11:
case CHIP_POLARIS12:
amdgpu_device_program_register_sequence(adev,
golden_settings_polaris11_a11,
ARRAY_SIZE(golden_settings_polaris11_a11));
amdgpu_device_program_register_sequence(adev,
polaris11_golden_common_all,
ARRAY_SIZE(polaris11_golden_common_all));
break;
case CHIP_POLARIS10:
amdgpu_device_program_register_sequence(adev,
golden_settings_polaris10_a11,
ARRAY_SIZE(golden_settings_polaris10_a11));
amdgpu_device_program_register_sequence(adev,
polaris10_golden_common_all,
ARRAY_SIZE(polaris10_golden_common_all));
data = RREG32_SMC(ixCG_ACLK_CNTL);
data &= ~CG_ACLK_CNTL__ACLK_DIVIDER_MASK;
data |= 0x18 << CG_ACLK_CNTL__ACLK_DIVIDER__SHIFT;
WREG32_SMC(ixCG_ACLK_CNTL, data);
if ((adev->pdev->device == 0x67DF) && (adev->pdev->revision == 0xc7) &&
((adev->pdev->subsystem_device == 0xb37 && adev->pdev->subsystem_vendor == 0x1002) ||
(adev->pdev->subsystem_device == 0x4a8 && adev->pdev->subsystem_vendor == 0x1043) ||
(adev->pdev->subsystem_device == 0x9480 && adev->pdev->subsystem_vendor == 0x1680))) {
amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1E, 0xDD);
amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1F, 0xD0);
}
break;
case CHIP_CARRIZO:
amdgpu_device_program_register_sequence(adev,
cz_mgcg_cgcg_init,
ARRAY_SIZE(cz_mgcg_cgcg_init));
amdgpu_device_program_register_sequence(adev,
cz_golden_settings_a11,
ARRAY_SIZE(cz_golden_settings_a11));
amdgpu_device_program_register_sequence(adev,
cz_golden_common_all,
ARRAY_SIZE(cz_golden_common_all));
break;
case CHIP_STONEY:
amdgpu_device_program_register_sequence(adev,
stoney_mgcg_cgcg_init,
ARRAY_SIZE(stoney_mgcg_cgcg_init));
amdgpu_device_program_register_sequence(adev,
stoney_golden_settings_a11,
ARRAY_SIZE(stoney_golden_settings_a11));
amdgpu_device_program_register_sequence(adev,
stoney_golden_common_all,
ARRAY_SIZE(stoney_golden_common_all));
break;
default:
break;
}
}
static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
uint32_t tmp = 0;
unsigned i;
int r;
WREG32(mmSCRATCH_REG0, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
if (r)
return r;
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
amdgpu_ring_write(ring, mmSCRATCH_REG0 - PACKET3_SET_UCONFIG_REG_START);
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(mmSCRATCH_REG0);
if (tmp == 0xDEADBEEF)
break;
udelay(1);
}
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
return r;
}
static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct dma_fence *f = NULL;
unsigned int index;
uint64_t gpu_addr;
uint32_t tmp;
long r;
r = amdgpu_device_wb_get(adev, &index);
if (r)
return r;
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 16,
AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err1;
ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
ib.ptr[2] = lower_32_bits(gpu_addr);
ib.ptr[3] = upper_32_bits(gpu_addr);
ib.ptr[4] = 0xDEADBEEF;
ib.length_dw = 5;
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
goto err2;
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
r = -ETIMEDOUT;
goto err2;
} else if (r < 0) {
goto err2;
}
tmp = adev->wb.wb[index];
if (tmp == 0xDEADBEEF)
r = 0;
else
r = -EINVAL;
err2:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err1:
amdgpu_device_wb_free(adev, index);
return r;
}
static void gfx_v8_0_free_microcode(struct amdgpu_device *adev)
{
amdgpu_ucode_release(&adev->gfx.pfp_fw);
amdgpu_ucode_release(&adev->gfx.me_fw);
amdgpu_ucode_release(&adev->gfx.ce_fw);
amdgpu_ucode_release(&adev->gfx.rlc_fw);
amdgpu_ucode_release(&adev->gfx.mec_fw);
if ((adev->asic_type != CHIP_STONEY) &&
(adev->asic_type != CHIP_TOPAZ))
amdgpu_ucode_release(&adev->gfx.mec2_fw);
kfree(adev->gfx.rlc.register_list_format);
}
static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
char fw_name[30];
int err;
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
const struct gfx_firmware_header_v1_0 *cp_hdr;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
unsigned int *tmp = NULL, i;
DRM_DEBUG("\n");
switch (adev->asic_type) {
case CHIP_TOPAZ:
chip_name = "topaz";
break;
case CHIP_TONGA:
chip_name = "tonga";
break;
case CHIP_CARRIZO:
chip_name = "carrizo";
break;
case CHIP_FIJI:
chip_name = "fiji";
break;
case CHIP_STONEY:
chip_name = "stoney";
break;
case CHIP_POLARIS10:
chip_name = "polaris10";
break;
case CHIP_POLARIS11:
chip_name = "polaris11";
break;
case CHIP_POLARIS12:
chip_name = "polaris12";
break;
case CHIP_VEGAM:
chip_name = "vegam";
break;
default:
BUG();
}
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp_2.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
if (err == -ENODEV) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
}
} else {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
}
if (err)
goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me_2.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
if (err == -ENODEV) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
}
} else {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
}
if (err)
goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce_2.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
if (err == -ENODEV) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
}
} else {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
}
if (err)
goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
/*
* Support for MCBP/Virtualization in combination with chained IBs is
* formal released on feature version #46
*/
if (adev->gfx.ce_feature_version >= 46 &&
adev->gfx.pfp_feature_version >= 46) {
adev->virt.chained_ib_support = true;
DRM_INFO("Chained IB support enabled!\n");
} else
adev->virt.chained_ib_support = false;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
if (err)
goto out;
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
adev->gfx.rlc.save_and_restore_offset =
le32_to_cpu(rlc_hdr->save_and_restore_offset);
adev->gfx.rlc.clear_state_descriptor_offset =
le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
adev->gfx.rlc.avail_scratch_ram_locations =
le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
adev->gfx.rlc.reg_restore_list_size =
le32_to_cpu(rlc_hdr->reg_restore_list_size);
adev->gfx.rlc.reg_list_format_start =
le32_to_cpu(rlc_hdr->reg_list_format_start);
adev->gfx.rlc.reg_list_format_separate_start =
le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
adev->gfx.rlc.starting_offsets_start =
le32_to_cpu(rlc_hdr->starting_offsets_start);
adev->gfx.rlc.reg_list_format_size_bytes =
le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
adev->gfx.rlc.reg_list_size_bytes =
le32_to_cpu(rlc_hdr->reg_list_size_bytes);
adev->gfx.rlc.register_list_format =
kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
if (!adev->gfx.rlc.register_list_format) {
err = -ENOMEM;
goto out;
}
tmp = (unsigned int *)((uintptr_t)rlc_hdr +
le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
tmp = (unsigned int *)((uintptr_t)rlc_hdr +
le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec_2.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
if (err == -ENODEV) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
}
} else {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
}
if (err)
goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
if ((adev->asic_type != CHIP_STONEY) &&
(adev->asic_type != CHIP_TOPAZ)) {
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2_2.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
if (err == -ENODEV) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
}
} else {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
}
if (!err) {
cp_hdr = (const struct gfx_firmware_header_v1_0 *)
adev->gfx.mec2_fw->data;
adev->gfx.mec2_fw_version =
le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.mec2_feature_version =
le32_to_cpu(cp_hdr->ucode_feature_version);
} else {
err = 0;
adev->gfx.mec2_fw = NULL;
}
}
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
info->fw = adev->gfx.pfp_fw;
header = (const struct common_firmware_header *)info->fw->data;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
info->fw = adev->gfx.me_fw;
header = (const struct common_firmware_header *)info->fw->data;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
info->fw = adev->gfx.ce_fw;
header = (const struct common_firmware_header *)info->fw->data;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
info->fw = adev->gfx.rlc_fw;
header = (const struct common_firmware_header *)info->fw->data;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
info->fw = adev->gfx.mec_fw;
header = (const struct common_firmware_header *)info->fw->data;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
/* we need account JT in */
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
if (amdgpu_sriov_vf(adev)) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE];
info->ucode_id = AMDGPU_UCODE_ID_STORAGE;
info->fw = adev->gfx.mec_fw;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE);
}
if (adev->gfx.mec2_fw) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
info->fw = adev->gfx.mec2_fw;
header = (const struct common_firmware_header *)info->fw->data;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
}
out:
if (err) {
dev_err(adev->dev,
"gfx8: Failed to load firmware \"%s\"\n",
fw_name);
amdgpu_ucode_release(&adev->gfx.pfp_fw);
amdgpu_ucode_release(&adev->gfx.me_fw);
amdgpu_ucode_release(&adev->gfx.ce_fw);
amdgpu_ucode_release(&adev->gfx.rlc_fw);
amdgpu_ucode_release(&adev->gfx.mec_fw);
amdgpu_ucode_release(&adev->gfx.mec2_fw);
}
return err;
}
static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
{
u32 count = 0, i;
const struct cs_section_def *sect = NULL;
const struct cs_extent_def *ext = NULL;
if (adev->gfx.rlc.cs_data == NULL)
return;
if (buffer == NULL)
return;
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
buffer[count++] = cpu_to_le32(0x80000000);
buffer[count++] = cpu_to_le32(0x80000000);
for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
for (ext = sect->section; ext->extent != NULL; ++ext) {
if (sect->id == SECT_CONTEXT) {
buffer[count++] =
cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
buffer[count++] = cpu_to_le32(ext->reg_index -
PACKET3_SET_CONTEXT_REG_START);
for (i = 0; i < ext->reg_count; i++)
buffer[count++] = cpu_to_le32(ext->extent[i]);
} else {
return;
}
}
}
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG -
PACKET3_SET_CONTEXT_REG_START);
buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1);
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
buffer[count++] = cpu_to_le32(0);
}
static int gfx_v8_0_cp_jump_table_num(struct amdgpu_device *adev)
{
if (adev->asic_type == CHIP_CARRIZO)
return 5;
else
return 4;
}
static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
{
const struct cs_section_def *cs_data;
int r;
adev->gfx.rlc.cs_data = vi_cs_data;
cs_data = adev->gfx.rlc.cs_data;
if (cs_data) {
/* init clear state block */
r = amdgpu_gfx_rlc_init_csb(adev);
if (r)
return r;
}
if ((adev->asic_type == CHIP_CARRIZO) ||
(adev->asic_type == CHIP_STONEY)) {
adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
r = amdgpu_gfx_rlc_init_cpt(adev);
if (r)
return r;
}
/* init spm vmid with 0xf */
if (adev->gfx.rlc.funcs->update_spm_vmid)
adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
return 0;
}
static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
{
amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
}
static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
{
int r;
u32 *hpd;
size_t mec_hpd_size;
bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
/* take ownership of the relevant compute queues */
amdgpu_gfx_compute_queue_acquire(adev);
mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE;
if (mec_hpd_size) {
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.mec.hpd_eop_obj,
&adev->gfx.mec.hpd_eop_gpu_addr,
(void **)&hpd);
if (r) {
dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
return r;
}
memset(hpd, 0, mec_hpd_size);
amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
}
return 0;
}
static const u32 vgpr_init_compute_shader[] =
{
0x7e000209, 0x7e020208,
0x7e040207, 0x7e060206,
0x7e080205, 0x7e0a0204,
0x7e0c0203, 0x7e0e0202,
0x7e100201, 0x7e120200,
0x7e140209, 0x7e160208,
0x7e180207, 0x7e1a0206,
0x7e1c0205, 0x7e1e0204,
0x7e200203, 0x7e220202,
0x7e240201, 0x7e260200,
0x7e280209, 0x7e2a0208,
0x7e2c0207, 0x7e2e0206,
0x7e300205, 0x7e320204,
0x7e340203, 0x7e360202,
0x7e380201, 0x7e3a0200,
0x7e3c0209, 0x7e3e0208,
0x7e400207, 0x7e420206,
0x7e440205, 0x7e460204,
0x7e480203, 0x7e4a0202,
0x7e4c0201, 0x7e4e0200,
0x7e500209, 0x7e520208,
0x7e540207, 0x7e560206,
0x7e580205, 0x7e5a0204,
0x7e5c0203, 0x7e5e0202,
0x7e600201, 0x7e620200,
0x7e640209, 0x7e660208,
0x7e680207, 0x7e6a0206,
0x7e6c0205, 0x7e6e0204,
0x7e700203, 0x7e720202,
0x7e740201, 0x7e760200,
0x7e780209, 0x7e7a0208,
0x7e7c0207, 0x7e7e0206,
0xbf8a0000, 0xbf810000,
};
static const u32 sgpr_init_compute_shader[] =
{
0xbe8a0100, 0xbe8c0102,
0xbe8e0104, 0xbe900106,
0xbe920108, 0xbe940100,
0xbe960102, 0xbe980104,
0xbe9a0106, 0xbe9c0108,
0xbe9e0100, 0xbea00102,
0xbea20104, 0xbea40106,
0xbea60108, 0xbea80100,
0xbeaa0102, 0xbeac0104,
0xbeae0106, 0xbeb00108,
0xbeb20100, 0xbeb40102,
0xbeb60104, 0xbeb80106,
0xbeba0108, 0xbebc0100,
0xbebe0102, 0xbec00104,
0xbec20106, 0xbec40108,
0xbec60100, 0xbec80102,
0xbee60004, 0xbee70005,
0xbeea0006, 0xbeeb0007,
0xbee80008, 0xbee90009,
0xbefc0000, 0xbf8a0000,
0xbf810000, 0x00000000,
};
static const u32 vgpr_init_regs[] =
{
mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff,
mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
mmCOMPUTE_NUM_THREAD_X, 256*4,
mmCOMPUTE_NUM_THREAD_Y, 1,
mmCOMPUTE_NUM_THREAD_Z, 1,
mmCOMPUTE_PGM_RSRC1, 0x100004f, /* VGPRS=15 (64 logical VGPRs), SGPRS=1 (16 SGPRs), BULKY=1 */
mmCOMPUTE_PGM_RSRC2, 20,
mmCOMPUTE_USER_DATA_0, 0xedcedc00,
mmCOMPUTE_USER_DATA_1, 0xedcedc01,
mmCOMPUTE_USER_DATA_2, 0xedcedc02,
mmCOMPUTE_USER_DATA_3, 0xedcedc03,
mmCOMPUTE_USER_DATA_4, 0xedcedc04,
mmCOMPUTE_USER_DATA_5, 0xedcedc05,
mmCOMPUTE_USER_DATA_6, 0xedcedc06,
mmCOMPUTE_USER_DATA_7, 0xedcedc07,
mmCOMPUTE_USER_DATA_8, 0xedcedc08,
mmCOMPUTE_USER_DATA_9, 0xedcedc09,
};
static const u32 sgpr1_init_regs[] =
{
mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f,
mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
mmCOMPUTE_NUM_THREAD_X, 256*5,
mmCOMPUTE_NUM_THREAD_Y, 1,
mmCOMPUTE_NUM_THREAD_Z, 1,
mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
mmCOMPUTE_PGM_RSRC2, 20,
mmCOMPUTE_USER_DATA_0, 0xedcedc00,
mmCOMPUTE_USER_DATA_1, 0xedcedc01,
mmCOMPUTE_USER_DATA_2, 0xedcedc02,
mmCOMPUTE_USER_DATA_3, 0xedcedc03,
mmCOMPUTE_USER_DATA_4, 0xedcedc04,
mmCOMPUTE_USER_DATA_5, 0xedcedc05,
mmCOMPUTE_USER_DATA_6, 0xedcedc06,
mmCOMPUTE_USER_DATA_7, 0xedcedc07,
mmCOMPUTE_USER_DATA_8, 0xedcedc08,
mmCOMPUTE_USER_DATA_9, 0xedcedc09,
};
static const u32 sgpr2_init_regs[] =
{
mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xf0,
mmCOMPUTE_RESOURCE_LIMITS, 0x1000000,
mmCOMPUTE_NUM_THREAD_X, 256*5,
mmCOMPUTE_NUM_THREAD_Y, 1,
mmCOMPUTE_NUM_THREAD_Z, 1,
mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
mmCOMPUTE_PGM_RSRC2, 20,
mmCOMPUTE_USER_DATA_0, 0xedcedc00,
mmCOMPUTE_USER_DATA_1, 0xedcedc01,
mmCOMPUTE_USER_DATA_2, 0xedcedc02,
mmCOMPUTE_USER_DATA_3, 0xedcedc03,
mmCOMPUTE_USER_DATA_4, 0xedcedc04,
mmCOMPUTE_USER_DATA_5, 0xedcedc05,
mmCOMPUTE_USER_DATA_6, 0xedcedc06,
mmCOMPUTE_USER_DATA_7, 0xedcedc07,
mmCOMPUTE_USER_DATA_8, 0xedcedc08,
mmCOMPUTE_USER_DATA_9, 0xedcedc09,
};
static const u32 sec_ded_counter_registers[] =
{
mmCPC_EDC_ATC_CNT,
mmCPC_EDC_SCRATCH_CNT,
mmCPC_EDC_UCODE_CNT,
mmCPF_EDC_ATC_CNT,
mmCPF_EDC_ROQ_CNT,
mmCPF_EDC_TAG_CNT,
mmCPG_EDC_ATC_CNT,
mmCPG_EDC_DMA_CNT,
mmCPG_EDC_TAG_CNT,
mmDC_EDC_CSINVOC_CNT,
mmDC_EDC_RESTORE_CNT,
mmDC_EDC_STATE_CNT,
mmGDS_EDC_CNT,
mmGDS_EDC_GRBM_CNT,
mmGDS_EDC_OA_DED,
mmSPI_EDC_CNT,
mmSQC_ATC_EDC_GATCL1_CNT,
mmSQC_EDC_CNT,
mmSQ_EDC_DED_CNT,
mmSQ_EDC_INFO,
mmSQ_EDC_SEC_CNT,
mmTCC_EDC_CNT,
mmTCP_ATC_EDC_GATCL1_CNT,
mmTCP_EDC_CNT,
mmTD_EDC_CNT
};
static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
struct amdgpu_ib ib;
struct dma_fence *f = NULL;
int r, i;
u32 tmp;
unsigned total_size, vgpr_offset, sgpr_offset;
u64 gpu_addr;
/* only supported on CZ */
if (adev->asic_type != CHIP_CARRIZO)
return 0;
/* bail if the compute ring is not ready */
if (!ring->sched.ready)
return 0;
tmp = RREG32(mmGB_EDC_MODE);
WREG32(mmGB_EDC_MODE, 0);
total_size =
(((ARRAY_SIZE(vgpr_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
total_size +=
(((ARRAY_SIZE(sgpr1_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
total_size +=
(((ARRAY_SIZE(sgpr2_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
total_size = ALIGN(total_size, 256);
vgpr_offset = total_size;
total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256);
sgpr_offset = total_size;
total_size += sizeof(sgpr_init_compute_shader);
/* allocate an indirect buffer to put the commands in */
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, total_size,
AMDGPU_IB_POOL_DIRECT, &ib);
if (r) {
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
return r;
}
/* load the compute shaders */
for (i = 0; i < ARRAY_SIZE(vgpr_init_compute_shader); i++)
ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_compute_shader[i];
for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
/* init the ib length to 0 */
ib.length_dw = 0;
/* VGPR */
/* write the register state for the compute dispatch */
for (i = 0; i < ARRAY_SIZE(vgpr_init_regs); i += 2) {
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
ib.ptr[ib.length_dw++] = vgpr_init_regs[i] - PACKET3_SET_SH_REG_START;
ib.ptr[ib.length_dw++] = vgpr_init_regs[i + 1];
}
/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
/* write dispatch packet */
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
ib.ptr[ib.length_dw++] = 8; /* x */
ib.ptr[ib.length_dw++] = 1; /* y */
ib.ptr[ib.length_dw++] = 1; /* z */
ib.ptr[ib.length_dw++] =
REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
/* write CS partial flush packet */
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
/* SGPR1 */
/* write the register state for the compute dispatch */
for (i = 0; i < ARRAY_SIZE(sgpr1_init_regs); i += 2) {
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
ib.ptr[ib.length_dw++] = sgpr1_init_regs[i] - PACKET3_SET_SH_REG_START;
ib.ptr[ib.length_dw++] = sgpr1_init_regs[i + 1];
}
/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
/* write dispatch packet */
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
ib.ptr[ib.length_dw++] = 8; /* x */
ib.ptr[ib.length_dw++] = 1; /* y */
ib.ptr[ib.length_dw++] = 1; /* z */
ib.ptr[ib.length_dw++] =
REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
/* write CS partial flush packet */
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
/* SGPR2 */
/* write the register state for the compute dispatch */
for (i = 0; i < ARRAY_SIZE(sgpr2_init_regs); i += 2) {
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
ib.ptr[ib.length_dw++] = sgpr2_init_regs[i] - PACKET3_SET_SH_REG_START;
ib.ptr[ib.length_dw++] = sgpr2_init_regs[i + 1];
}
/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
/* write dispatch packet */
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
ib.ptr[ib.length_dw++] = 8; /* x */
ib.ptr[ib.length_dw++] = 1; /* y */
ib.ptr[ib.length_dw++] = 1; /* z */
ib.ptr[ib.length_dw++] =
REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
/* write CS partial flush packet */
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
/* shedule the ib on the ring */
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r) {
DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
goto fail;
}
/* wait for the GPU to finish processing the IB */
r = dma_fence_wait(f, false);
if (r) {
DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
goto fail;
}
tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, DED_MODE, 2);
tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, PROP_FED, 1);
WREG32(mmGB_EDC_MODE, tmp);
tmp = RREG32(mmCC_GC_EDC_CONFIG);
tmp = REG_SET_FIELD(tmp, CC_GC_EDC_CONFIG, DIS_EDC, 0) | 1;
WREG32(mmCC_GC_EDC_CONFIG, tmp);
/* read back registers to clear the counters */
for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++)
RREG32(sec_ded_counter_registers[i]);
fail:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
return r;
}
static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
{
u32 gb_addr_config;
u32 mc_arb_ramcfg;
u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
u32 tmp;
int ret;
switch (adev->asic_type) {
case CHIP_TOPAZ:
adev->gfx.config.max_shader_engines = 1;
adev->gfx.config.max_tile_pipes = 2;
adev->gfx.config.max_cu_per_sh = 6;
adev->gfx.config.max_sh_per_se = 1;
adev->gfx.config.max_backends_per_se = 2;
adev->gfx.config.max_texture_channel_caches = 2;
adev->gfx.config.max_gprs = 256;
adev->gfx.config.max_gs_threads = 32;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_FIJI:
adev->gfx.config.max_shader_engines = 4;
adev->gfx.config.max_tile_pipes = 16;
adev->gfx.config.max_cu_per_sh = 16;
adev->gfx.config.max_sh_per_se = 1;
adev->gfx.config.max_backends_per_se = 4;
adev->gfx.config.max_texture_channel_caches = 16;
adev->gfx.config.max_gprs = 256;
adev->gfx.config.max_gs_threads = 32;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_POLARIS11:
case CHIP_POLARIS12:
ret = amdgpu_atombios_get_gfx_info(adev);
if (ret)
return ret;
adev->gfx.config.max_gprs = 256;
adev->gfx.config.max_gs_threads = 32;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = POLARIS11_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_POLARIS10:
case CHIP_VEGAM:
ret = amdgpu_atombios_get_gfx_info(adev);
if (ret)
return ret;
adev->gfx.config.max_gprs = 256;
adev->gfx.config.max_gs_threads = 32;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_TONGA:
adev->gfx.config.max_shader_engines = 4;
adev->gfx.config.max_tile_pipes = 8;
adev->gfx.config.max_cu_per_sh = 8;
adev->gfx.config.max_sh_per_se = 1;
adev->gfx.config.max_backends_per_se = 2;
adev->gfx.config.max_texture_channel_caches = 8;
adev->gfx.config.max_gprs = 256;
adev->gfx.config.max_gs_threads = 32;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_CARRIZO:
adev->gfx.config.max_shader_engines = 1;
adev->gfx.config.max_tile_pipes = 2;
adev->gfx.config.max_sh_per_se = 1;
adev->gfx.config.max_backends_per_se = 2;
adev->gfx.config.max_cu_per_sh = 8;
adev->gfx.config.max_texture_channel_caches = 2;
adev->gfx.config.max_gprs = 256;
adev->gfx.config.max_gs_threads = 32;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_STONEY:
adev->gfx.config.max_shader_engines = 1;
adev->gfx.config.max_tile_pipes = 2;
adev->gfx.config.max_sh_per_se = 1;
adev->gfx.config.max_backends_per_se = 1;
adev->gfx.config.max_cu_per_sh = 3;
adev->gfx.config.max_texture_channel_caches = 2;
adev->gfx.config.max_gprs = 256;
adev->gfx.config.max_gs_threads = 16;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
break;
default:
adev->gfx.config.max_shader_engines = 2;
adev->gfx.config.max_tile_pipes = 4;
adev->gfx.config.max_cu_per_sh = 2;
adev->gfx.config.max_sh_per_se = 1;
adev->gfx.config.max_backends_per_se = 2;
adev->gfx.config.max_texture_channel_caches = 4;
adev->gfx.config.max_gprs = 256;
adev->gfx.config.max_gs_threads = 32;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
break;
}
adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg,
MC_ARB_RAMCFG, NOOFBANK);
adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg,
MC_ARB_RAMCFG, NOOFRANKS);
adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
adev->gfx.config.mem_max_burst_length_bytes = 256;
if (adev->flags & AMD_IS_APU) {
/* Get memory bank mapping mode. */
tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
/* Validate settings in case only one DIMM installed. */
if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
dimm00_addr_map = 0;
if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
dimm01_addr_map = 0;
if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
dimm10_addr_map = 0;
if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
dimm11_addr_map = 0;
/* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
/* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
adev->gfx.config.mem_row_size_in_kb = 2;
else
adev->gfx.config.mem_row_size_in_kb = 1;
} else {
tmp = REG_GET_FIELD(mc_arb_ramcfg, MC_ARB_RAMCFG, NOOFCOLS);
adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
if (adev->gfx.config.mem_row_size_in_kb > 4)
adev->gfx.config.mem_row_size_in_kb = 4;
}
adev->gfx.config.shader_engine_tile_size = 32;
adev->gfx.config.num_gpus = 1;
adev->gfx.config.multi_gpu_tile_size = 64;
/* fix up row size */
switch (adev->gfx.config.mem_row_size_in_kb) {
case 1:
default:
gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 0);
break;
case 2:
gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 1);
break;
case 4:
gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 2);
break;
}
adev->gfx.config.gb_addr_config = gb_addr_config;
return 0;
}
static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
int mec, int pipe, int queue)
{
int r;
unsigned irq_type;
struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
unsigned int hw_prio;
ring = &adev->gfx.compute_ring[ring_id];
/* mec0 is me1 */
ring->me = mec + 1;
ring->pipe = pipe;
ring->queue = queue;
ring->ring_obj = NULL;
ring->use_doorbell = true;
ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id;
ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
+ (ring_id * GFX8_MEC_HPD_SIZE);
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
+ ring->pipe;
hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
AMDGPU_RING_PRIO_2 : AMDGPU_RING_PRIO_DEFAULT;
/* type-2 packets are deprecated on MEC, use type-3 instead */
r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
hw_prio, NULL);
if (r)
return r;
return 0;
}
static void gfx_v8_0_sq_irq_work_func(struct work_struct *work);
static int gfx_v8_0_sw_init(void *handle)
{
int i, j, k, r, ring_id;
struct amdgpu_ring *ring;
struct amdgpu_kiq *kiq;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
switch (adev->asic_type) {
case CHIP_TONGA:
case CHIP_CARRIZO:
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
case CHIP_POLARIS12:
case CHIP_VEGAM:
adev->gfx.mec.num_mec = 2;
break;
case CHIP_TOPAZ:
case CHIP_STONEY:
default:
adev->gfx.mec.num_mec = 1;
break;
}
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 8;
/* EOP Event */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq);
if (r)
return r;
/* Privileged reg */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT,
&adev->gfx.priv_reg_irq);
if (r)
return r;
/* Privileged inst */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT,
&adev->gfx.priv_inst_irq);
if (r)
return r;
/* Add CP EDC/ECC irq */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_ECC_ERROR,
&adev->gfx.cp_ecc_error_irq);
if (r)
return r;
/* SQ interrupts. */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG,
&adev->gfx.sq_irq);
if (r) {
DRM_ERROR("amdgpu_irq_add() for SQ failed: %d\n", r);
return r;
}
INIT_WORK(&adev->gfx.sq_work.work, gfx_v8_0_sq_irq_work_func);
adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
r = gfx_v8_0_init_microcode(adev);
if (r) {
DRM_ERROR("Failed to load gfx firmware!\n");
return r;
}
r = adev->gfx.rlc.funcs->init(adev);
if (r) {
DRM_ERROR("Failed to init rlc BOs!\n");
return r;
}
r = gfx_v8_0_mec_init(adev);
if (r) {
DRM_ERROR("Failed to init MEC BOs!\n");
return r;
}
/* set up the gfx ring */
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i];
ring->ring_obj = NULL;
sprintf(ring->name, "gfx");
/* no gfx doorbells on iceland */
if (adev->asic_type != CHIP_TOPAZ) {
ring->use_doorbell = true;
ring->doorbell_index = adev->doorbell_index.gfx_ring0;
}
r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
/* set up the compute queues - allocate horizontally across pipes */
ring_id = 0;
for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
k, j))
continue;
r = gfx_v8_0_compute_ring_init(adev,
ring_id,
i, k, j);
if (r)
return r;
ring_id++;
}
}
}
r = amdgpu_gfx_kiq_init(adev, GFX8_MEC_HPD_SIZE, 0);
if (r) {
DRM_ERROR("Failed to init KIQ BOs!\n");
return r;
}
kiq = &adev->gfx.kiq[0];
r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0);
if (r)
return r;
/* create MQD for all compute queues as well as KIQ for SRIOV case */
r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct vi_mqd_allocation), 0);
if (r)
return r;
adev->gfx.ce_ram_size = 0x8000;
r = gfx_v8_0_gpu_early_init(adev);
if (r)
return r;
return 0;
}
static int gfx_v8_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
for (i = 0; i < adev->gfx.num_compute_rings; i++)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
amdgpu_gfx_mqd_sw_fini(adev, 0);
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
amdgpu_gfx_kiq_fini(adev, 0);
gfx_v8_0_mec_fini(adev);
amdgpu_gfx_rlc_fini(adev);
amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr,
(void **)&adev->gfx.rlc.cs_ptr);
if ((adev->asic_type == CHIP_CARRIZO) ||
(adev->asic_type == CHIP_STONEY)) {
amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
&adev->gfx.rlc.cp_table_gpu_addr,
(void **)&adev->gfx.rlc.cp_table_ptr);
}
gfx_v8_0_free_microcode(adev);
return 0;
}
static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
{
uint32_t *modearray, *mod2array;
const u32 num_tile_mode_states = ARRAY_SIZE(adev->gfx.config.tile_mode_array);
const u32 num_secondary_tile_mode_states = ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
u32 reg_offset;
modearray = adev->gfx.config.tile_mode_array;
mod2array = adev->gfx.config.macrotile_mode_array;
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
modearray[reg_offset] = 0;
for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
mod2array[reg_offset] = 0;
switch (adev->asic_type) {
case CHIP_TOPAZ:
modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
PIPE_CONFIG(ADDR_SURF_P2));
modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
reg_offset != 23)
WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
if (reg_offset != 7)
WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
break;
case CHIP_FIJI:
case CHIP_VEGAM:
modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
NUM_BANKS(ADDR_SURF_4_BANK));
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
if (reg_offset != 7)
WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
break;
case CHIP_TONGA:
modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
NUM_BANKS(ADDR_SURF_4_BANK));
mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
NUM_BANKS(ADDR_SURF_4_BANK));
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
if (reg_offset != 7)
WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
break;
case CHIP_POLARIS11:
case CHIP_POLARIS12:
modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
PIPE_CONFIG(ADDR_SURF_P4_16x16));
modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
NUM_BANKS(ADDR_SURF_4_BANK));
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
if (reg_offset != 7)
WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
break;
case CHIP_POLARIS10:
modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
NUM_BANKS(ADDR_SURF_4_BANK));
mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
NUM_BANKS(ADDR_SURF_4_BANK));
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
if (reg_offset != 7)
WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
break;
case CHIP_STONEY:
modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
PIPE_CONFIG(ADDR_SURF_P2));
modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
reg_offset != 23)
WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
if (reg_offset != 7)
WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
break;
default:
dev_warn(adev->dev,
"Unknown chip type (%d) in function gfx_v8_0_tiling_mode_table_init() falling through to CHIP_CARRIZO\n",
adev->asic_type);
fallthrough;
case CHIP_CARRIZO:
modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
PIPE_CONFIG(ADDR_SURF_P2));
modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
reg_offset != 23)
WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
if (reg_offset != 7)
WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
break;
}
}
static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev,
u32 se_num, u32 sh_num, u32 instance,
int xcc_id)
{
u32 data;
if (instance == 0xffffffff)
data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
else
data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
if (se_num == 0xffffffff)
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
else
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
if (sh_num == 0xffffffff)
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
else
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
WREG32(mmGRBM_GFX_INDEX, data);
}
static void gfx_v8_0_select_me_pipe_q(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
{
vi_srbm_select(adev, me, pipe, q, vm);
}
static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
{
u32 data, mask;
data = RREG32(mmCC_RB_BACKEND_DISABLE) |
RREG32(mmGC_USER_RB_BACKEND_DISABLE);
data = REG_GET_FIELD(data, GC_USER_RB_BACKEND_DISABLE, BACKEND_DISABLE);
mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
adev->gfx.config.max_sh_per_se);
return (~data) & mask;
}
static void
gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
{
switch (adev->asic_type) {
case CHIP_FIJI:
case CHIP_VEGAM:
*rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
RB_XSEL2(1) | PKR_MAP(2) |
PKR_XSEL(1) | PKR_YSEL(1) |
SE_MAP(2) | SE_XSEL(2) | SE_YSEL(3);
*rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
SE_PAIR_YSEL(2);
break;
case CHIP_TONGA:
case CHIP_POLARIS10:
*rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
SE_XSEL(1) | SE_YSEL(1);
*rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(2) |
SE_PAIR_YSEL(2);
break;
case CHIP_TOPAZ:
case CHIP_CARRIZO:
*rconf |= RB_MAP_PKR0(2);
*rconf1 |= 0x0;
break;
case CHIP_POLARIS11:
case CHIP_POLARIS12:
*rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
SE_XSEL(1) | SE_YSEL(1);
*rconf1 |= 0x0;
break;
case CHIP_STONEY:
*rconf |= 0x0;
*rconf1 |= 0x0;
break;
default:
DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
break;
}
}
static void
gfx_v8_0_write_harvested_raster_configs(struct amdgpu_device *adev,
u32 raster_config, u32 raster_config_1,
unsigned rb_mask, unsigned num_rb)
{
unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
unsigned rb_per_se = num_rb / num_se;
unsigned se_mask[4];
unsigned se;
se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
(!se_mask[2] && !se_mask[3]))) {
raster_config_1 &= ~SE_PAIR_MAP_MASK;
if (!se_mask[0] && !se_mask[1]) {
raster_config_1 |=
SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
} else {
raster_config_1 |=
SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
}
}
for (se = 0; se < num_se; se++) {
unsigned raster_config_se = raster_config;
unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
int idx = (se / 2) * 2;
if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
raster_config_se &= ~SE_MAP_MASK;
if (!se_mask[idx]) {
raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
} else {
raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
}
}
pkr0_mask &= rb_mask;
pkr1_mask &= rb_mask;
if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
raster_config_se &= ~PKR_MAP_MASK;
if (!pkr0_mask) {
raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
} else {
raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
}
}
if (rb_per_se >= 2) {
unsigned rb0_mask = 1 << (se * rb_per_se);
unsigned rb1_mask = rb0_mask << 1;
rb0_mask &= rb_mask;
rb1_mask &= rb_mask;
if (!rb0_mask || !rb1_mask) {
raster_config_se &= ~RB_MAP_PKR0_MASK;
if (!rb0_mask) {
raster_config_se |=
RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
} else {
raster_config_se |=
RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
}
}
if (rb_per_se > 2) {
rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
rb1_mask = rb0_mask << 1;
rb0_mask &= rb_mask;
rb1_mask &= rb_mask;
if (!rb0_mask || !rb1_mask) {
raster_config_se &= ~RB_MAP_PKR1_MASK;
if (!rb0_mask) {
raster_config_se |=
RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
} else {
raster_config_se |=
RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
}
}
}
}
/* GRBM_GFX_INDEX has a different offset on VI */
gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff, 0);
WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
}
/* GRBM_GFX_INDEX has a different offset on VI */
gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
}
static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
{
int i, j;
u32 data;
u32 raster_config = 0, raster_config_1 = 0;
u32 active_rbs = 0;
u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
adev->gfx.config.max_sh_per_se;
unsigned num_rb_pipes;
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0);
data = gfx_v8_0_get_rb_active_bitmap(adev);
active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
rb_bitmap_width_per_sh);
}
}
gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
adev->gfx.config.backend_enable_mask = active_rbs;
adev->gfx.config.num_rbs = hweight32(active_rbs);
num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
adev->gfx.config.max_shader_engines, 16);
gfx_v8_0_raster_config(adev, &raster_config, &raster_config_1);
if (!adev->gfx.config.backend_enable_mask ||
adev->gfx.config.num_rbs >= num_rb_pipes) {
WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
} else {
gfx_v8_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
adev->gfx.config.backend_enable_mask,
num_rb_pipes);
}
/* cache the values for userspace */
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0);
adev->gfx.config.rb_config[i][j].rb_backend_disable =
RREG32(mmCC_RB_BACKEND_DISABLE);
adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
RREG32(mmGC_USER_RB_BACKEND_DISABLE);
adev->gfx.config.rb_config[i][j].raster_config =
RREG32(mmPA_SC_RASTER_CONFIG);
adev->gfx.config.rb_config[i][j].raster_config_1 =
RREG32(mmPA_SC_RASTER_CONFIG_1);
}
}
gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
}
#define DEFAULT_SH_MEM_BASES (0x6000)
/**
* gfx_v8_0_init_compute_vmid - gart enable
*
* @adev: amdgpu_device pointer
*
* Initialize compute vmid sh_mem registers
*
*/
static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
{
int i;
uint32_t sh_mem_config;
uint32_t sh_mem_bases;
/*
* Configure apertures:
* LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
* Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
* GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
*/
sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
sh_mem_config = SH_MEM_ADDRESS_MODE_HSA64 <<
SH_MEM_CONFIG__ADDRESS_MODE__SHIFT |
SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
SH_MEM_CONFIG__PRIVATE_ATC_MASK;
mutex_lock(&adev->srbm_mutex);
for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
vi_srbm_select(adev, 0, 0, 0, i);
/* CP and shaders */
WREG32(mmSH_MEM_CONFIG, sh_mem_config);
WREG32(mmSH_MEM_APE1_BASE, 1);
WREG32(mmSH_MEM_APE1_LIMIT, 0);
WREG32(mmSH_MEM_BASES, sh_mem_bases);
}
vi_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
/* Initialize all compute VMIDs to have no GDS, GWS, or OA
access. These should be enabled by FW for target VMIDs. */
for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
WREG32(amdgpu_gds_reg_offset[i].gws, 0);
WREG32(amdgpu_gds_reg_offset[i].oa, 0);
}
}
static void gfx_v8_0_init_gds_vmid(struct amdgpu_device *adev)
{
int vmid;
/*
* Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
* access. Compute VMIDs should be enabled by FW for target VMIDs,
* the driver can enable them for graphics. VMID0 should maintain
* access so that HWS firmware can save/restore entries.
*/
for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
WREG32(amdgpu_gds_reg_offset[vmid].mem_base, 0);
WREG32(amdgpu_gds_reg_offset[vmid].mem_size, 0);
WREG32(amdgpu_gds_reg_offset[vmid].gws, 0);
WREG32(amdgpu_gds_reg_offset[vmid].oa, 0);
}
}
static void gfx_v8_0_config_init(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
default:
adev->gfx.config.double_offchip_lds_buf = 1;
break;
case CHIP_CARRIZO:
case CHIP_STONEY:
adev->gfx.config.double_offchip_lds_buf = 0;
break;
}
}
static void gfx_v8_0_constants_init(struct amdgpu_device *adev)
{
u32 tmp, sh_static_mem_cfg;
int i;
WREG32_FIELD(GRBM_CNTL, READ_TIMEOUT, 0xFF);
WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
gfx_v8_0_tiling_mode_table_init(adev);
gfx_v8_0_setup_rb(adev);
gfx_v8_0_get_cu_info(adev);
gfx_v8_0_config_init(adev);
/* XXX SH_MEM regs */
/* where to put LDS, scratch, GPUVM in FSA64 space */
sh_static_mem_cfg = REG_SET_FIELD(0, SH_STATIC_MEM_CONFIG,
SWIZZLE_ENABLE, 1);
sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
ELEMENT_SIZE, 1);
sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
INDEX_STRIDE, 3);
WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
mutex_lock(&adev->srbm_mutex);
for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
vi_srbm_select(adev, 0, 0, 0, i);
/* CP and shaders */
if (i == 0) {
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_UC);
tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC);
tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
SH_MEM_ALIGNMENT_MODE_UNALIGNED);
WREG32(mmSH_MEM_CONFIG, tmp);
WREG32(mmSH_MEM_BASES, 0);
} else {
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_NC);
tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC);
tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
SH_MEM_ALIGNMENT_MODE_UNALIGNED);
WREG32(mmSH_MEM_CONFIG, tmp);
tmp = adev->gmc.shared_aperture_start >> 48;
WREG32(mmSH_MEM_BASES, tmp);
}
WREG32(mmSH_MEM_APE1_BASE, 1);
WREG32(mmSH_MEM_APE1_LIMIT, 0);
}
vi_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
gfx_v8_0_init_compute_vmid(adev);
gfx_v8_0_init_gds_vmid(adev);
mutex_lock(&adev->grbm_idx_mutex);
/*
* making sure that the following register writes will be broadcasted
* to all the shaders
*/
gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
WREG32(mmPA_SC_FIFO_SIZE,
(adev->gfx.config.sc_prim_fifo_size_frontend <<
PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
(adev->gfx.config.sc_prim_fifo_size_backend <<
PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
(adev->gfx.config.sc_hiz_tile_fifo_size <<
PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
(adev->gfx.config.sc_earlyz_tile_fifo_size <<
PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
tmp = RREG32(mmSPI_ARB_PRIORITY);
tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS0, 2);
tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS1, 2);
tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS2, 2);
tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS3, 2);
WREG32(mmSPI_ARB_PRIORITY, tmp);
mutex_unlock(&adev->grbm_idx_mutex);
}
static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
{
u32 i, j, k;
u32 mask;
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0);
for (k = 0; k < adev->usec_timeout; k++) {
if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
break;
udelay(1);
}
if (k == adev->usec_timeout) {
gfx_v8_0_select_se_sh(adev, 0xffffffff,
0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
i, j);
return;
}
}
}
gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
for (k = 0; k < adev->usec_timeout; k++) {
if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
break;
udelay(1);
}
}
static void gfx_v8_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
bool enable)
{
u32 tmp = RREG32(mmCP_INT_CNTL_RING0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
WREG32(mmCP_INT_CNTL_RING0, tmp);
}
static void gfx_v8_0_init_csb(struct amdgpu_device *adev)
{
adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
/* csib */
WREG32(mmRLC_CSIB_ADDR_HI,
adev->gfx.rlc.clear_state_gpu_addr >> 32);
WREG32(mmRLC_CSIB_ADDR_LO,
adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
WREG32(mmRLC_CSIB_LENGTH,
adev->gfx.rlc.clear_state_size);
}
static void gfx_v8_0_parse_ind_reg_list(int *register_list_format,
int ind_offset,
int list_size,
int *unique_indices,
int *indices_count,
int max_indices,
int *ind_start_offsets,
int *offset_count,
int max_offset)
{
int indices;
bool new_entry = true;
for (; ind_offset < list_size; ind_offset++) {
if (new_entry) {
new_entry = false;
ind_start_offsets[*offset_count] = ind_offset;
*offset_count = *offset_count + 1;
BUG_ON(*offset_count >= max_offset);
}
if (register_list_format[ind_offset] == 0xFFFFFFFF) {
new_entry = true;
continue;
}
ind_offset += 2;
/* look for the matching indice */
for (indices = 0;
indices < *indices_count;
indices++) {
if (unique_indices[indices] ==
register_list_format[ind_offset])
break;
}
if (indices >= *indices_count) {
unique_indices[*indices_count] =
register_list_format[ind_offset];
indices = *indices_count;
*indices_count = *indices_count + 1;
BUG_ON(*indices_count >= max_indices);
}
register_list_format[ind_offset] = indices;
}
}
static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
{
int i, temp, data;
int unique_indices[] = {0, 0, 0, 0, 0, 0, 0, 0};
int indices_count = 0;
int indirect_start_offsets[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
int offset_count = 0;
int list_size;
unsigned int *register_list_format =
kmemdup(adev->gfx.rlc.register_list_format,
adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
if (!register_list_format)
return -ENOMEM;
gfx_v8_0_parse_ind_reg_list(register_list_format,
RLC_FormatDirectRegListLength,
adev->gfx.rlc.reg_list_format_size_bytes >> 2,
unique_indices,
&indices_count,
ARRAY_SIZE(unique_indices),
indirect_start_offsets,
&offset_count,
ARRAY_SIZE(indirect_start_offsets));
/* save and restore list */
WREG32_FIELD(RLC_SRM_CNTL, AUTO_INCR_ADDR, 1);
WREG32(mmRLC_SRM_ARAM_ADDR, 0);
for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
WREG32(mmRLC_SRM_ARAM_DATA, adev->gfx.rlc.register_restore[i]);
/* indirect list */
WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_list_format_start);
for (i = 0; i < adev->gfx.rlc.reg_list_format_size_bytes >> 2; i++)
WREG32(mmRLC_GPM_SCRATCH_DATA, register_list_format[i]);
list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
list_size = list_size >> 1;
WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_restore_list_size);
WREG32(mmRLC_GPM_SCRATCH_DATA, list_size);
/* starting offsets starts */
WREG32(mmRLC_GPM_SCRATCH_ADDR,
adev->gfx.rlc.starting_offsets_start);
for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
WREG32(mmRLC_GPM_SCRATCH_DATA,
indirect_start_offsets[i]);
/* unique indices */
temp = mmRLC_SRM_INDEX_CNTL_ADDR_0;
data = mmRLC_SRM_INDEX_CNTL_DATA_0;
for (i = 0; i < ARRAY_SIZE(unique_indices); i++) {
if (unique_indices[i] != 0) {
WREG32(temp + i, unique_indices[i] & 0x3FFFF);
WREG32(data + i, unique_indices[i] >> 20);
}
}
kfree(register_list_format);
return 0;
}
static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev)
{
WREG32_FIELD(RLC_SRM_CNTL, SRM_ENABLE, 1);
}
static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev)
{
uint32_t data;
WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60);
data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10);
data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10);
data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10);
data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10);
WREG32(mmRLC_PG_DELAY, data);
WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3);
WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0);
}
static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
bool enable)
{
WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PU_ENABLE, enable ? 1 : 0);
}
static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
bool enable)
{
WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PD_ENABLE, enable ? 1 : 0);
}
static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable)
{
WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 0 : 1);
}
static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
{
if ((adev->asic_type == CHIP_CARRIZO) ||
(adev->asic_type == CHIP_STONEY)) {
gfx_v8_0_init_csb(adev);
gfx_v8_0_init_save_restore_list(adev);
gfx_v8_0_enable_save_restore_machine(adev);
WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
gfx_v8_0_init_power_gating(adev);
WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
} else if ((adev->asic_type == CHIP_POLARIS11) ||
(adev->asic_type == CHIP_POLARIS12) ||
(adev->asic_type == CHIP_VEGAM)) {
gfx_v8_0_init_csb(adev);
gfx_v8_0_init_save_restore_list(adev);
gfx_v8_0_enable_save_restore_machine(adev);
gfx_v8_0_init_power_gating(adev);
}
}
static void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
{
WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 0);
gfx_v8_0_enable_gui_idle_interrupt(adev, false);
gfx_v8_0_wait_for_rlc_serdes(adev);
}
static void gfx_v8_0_rlc_reset(struct amdgpu_device *adev)
{
WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
udelay(50);
WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
udelay(50);
}
static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
{
WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 1);
/* carrizo do enable cp interrupt after cp inited */
if (!(adev->flags & AMD_IS_APU))
gfx_v8_0_enable_gui_idle_interrupt(adev, true);
udelay(50);
}
static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
{
if (amdgpu_sriov_vf(adev)) {
gfx_v8_0_init_csb(adev);
return 0;
}
adev->gfx.rlc.funcs->stop(adev);
adev->gfx.rlc.funcs->reset(adev);
gfx_v8_0_init_pg(adev);
adev->gfx.rlc.funcs->start(adev);
return 0;
}
static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
{
u32 tmp = RREG32(mmCP_ME_CNTL);
if (enable) {
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 0);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 0);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 0);
} else {
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
}
WREG32(mmCP_ME_CNTL, tmp);
udelay(50);
}
static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev)
{
u32 count = 0;
const struct cs_section_def *sect = NULL;
const struct cs_extent_def *ext = NULL;
/* begin clear state */
count += 2;
/* context control state */
count += 3;
for (sect = vi_cs_data; sect->section != NULL; ++sect) {
for (ext = sect->section; ext->extent != NULL; ++ext) {
if (sect->id == SECT_CONTEXT)
count += 2 + ext->reg_count;
else
return 0;
}
}
/* pa_sc_raster_config/pa_sc_raster_config1 */
count += 4;
/* end clear state */
count += 2;
/* clear state */
count += 2;
return count;
}
static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
const struct cs_section_def *sect = NULL;
const struct cs_extent_def *ext = NULL;
int r, i;
/* init the CP */
WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
WREG32(mmCP_ENDIAN_SWAP, 0);
WREG32(mmCP_DEVICE_ID, 1);
gfx_v8_0_cp_gfx_enable(adev, true);
r = amdgpu_ring_alloc(ring, gfx_v8_0_get_csb_size(adev) + 4);
if (r) {
DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
return r;
}
/* clear state buffer */
amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
amdgpu_ring_write(ring, 0x80000000);
amdgpu_ring_write(ring, 0x80000000);
for (sect = vi_cs_data; sect->section != NULL; ++sect) {
for (ext = sect->section; ext->extent != NULL; ++ext) {
if (sect->id == SECT_CONTEXT) {
amdgpu_ring_write(ring,
PACKET3(PACKET3_SET_CONTEXT_REG,
ext->reg_count));
amdgpu_ring_write(ring,
ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
for (i = 0; i < ext->reg_count; i++)
amdgpu_ring_write(ring, ext->extent[i]);
}
}
}
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config);
amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config_1);
amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
amdgpu_ring_write(ring, 0);
/* init the CE partitions */
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
amdgpu_ring_write(ring, 0x8000);
amdgpu_ring_write(ring, 0x8000);
amdgpu_ring_commit(ring);
return 0;
}
static void gfx_v8_0_set_cpg_door_bell(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
u32 tmp;
/* no gfx doorbells on iceland */
if (adev->asic_type == CHIP_TOPAZ)
return;
tmp = RREG32(mmCP_RB_DOORBELL_CONTROL);
if (ring->use_doorbell) {
tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
DOORBELL_OFFSET, ring->doorbell_index);
tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
DOORBELL_HIT, 0);
tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
DOORBELL_EN, 1);
} else {
tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
}
WREG32(mmCP_RB_DOORBELL_CONTROL, tmp);
if (adev->flags & AMD_IS_APU)
return;
tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
DOORBELL_RANGE_LOWER,
adev->doorbell_index.gfx_ring0);
WREG32(mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
WREG32(mmCP_RB_DOORBELL_RANGE_UPPER,
CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
}
static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
u32 tmp;
u32 rb_bufsz;
u64 rb_addr, rptr_addr, wptr_gpu_addr;
/* Set the write pointer delay */
WREG32(mmCP_RB_WPTR_DELAY, 0);
/* set the RB to use vmid 0 */
WREG32(mmCP_RB_VMID, 0);
/* Set ring buffer size */
ring = &adev->gfx.gfx_ring[0];
rb_bufsz = order_base_2(ring->ring_size / 8);
tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MTYPE, 3);
tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MIN_IB_AVAILSZ, 1);
#ifdef __BIG_ENDIAN
tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
#endif
WREG32(mmCP_RB0_CNTL, tmp);
/* Initialize the ring buffer's read and write pointers */
WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK);
ring->wptr = 0;
WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
/* set the wb address wether it's enabled or not */
rptr_addr = ring->rptr_gpu_addr;
WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
wptr_gpu_addr = ring->wptr_gpu_addr;
WREG32(mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
WREG32(mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
mdelay(1);
WREG32(mmCP_RB0_CNTL, tmp);
rb_addr = ring->gpu_addr >> 8;
WREG32(mmCP_RB0_BASE, rb_addr);
WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
gfx_v8_0_set_cpg_door_bell(adev, ring);
/* start the ring */
amdgpu_ring_clear_ring(ring);
gfx_v8_0_cp_gfx_start(adev);
return 0;
}
static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
{
if (enable) {
WREG32(mmCP_MEC_CNTL, 0);
} else {
WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
adev->gfx.kiq[0].ring.sched.ready = false;
}
udelay(50);
}
/* KIQ functions */
static void gfx_v8_0_kiq_setting(struct amdgpu_ring *ring)
{
uint32_t tmp;
struct amdgpu_device *adev = ring->adev;
/* tell RLC which is KIQ queue */
tmp = RREG32(mmRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
WREG32(mmRLC_CP_SCHEDULERS, tmp);
tmp |= 0x80;
WREG32(mmRLC_CP_SCHEDULERS, tmp);
}
static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
{
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
uint64_t queue_mask = 0;
int r, i;
for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
if (!test_bit(i, adev->gfx.mec_bitmap[0].queue_bitmap))
continue;
/* This situation may be hit in the future if a new HW
* generation exposes more than 64 queues. If so, the
* definition of queue_mask needs updating */
if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
DRM_ERROR("Invalid KCQ enabled: %d\n", i);
break;
}
queue_mask |= (1ull << i);
}
r = amdgpu_ring_alloc(kiq_ring, (8 * adev->gfx.num_compute_rings) + 8);
if (r) {
DRM_ERROR("Failed to lock KIQ (%d).\n", r);
return r;
}
/* set resources */
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
amdgpu_ring_write(kiq_ring, 0); /* vmid_mask:0 queue_type:0 (KIQ) */
amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
amdgpu_ring_write(kiq_ring, 0); /* oac mask */
amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
uint64_t wptr_addr = ring->wptr_gpu_addr;
/* map queues */
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
amdgpu_ring_write(kiq_ring,
PACKET3_MAP_QUEUES_NUM_QUEUES(1));
amdgpu_ring_write(kiq_ring,
PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index) |
PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
PACKET3_MAP_QUEUES_ME(ring->me == 1 ? 0 : 1)); /* doorbell */
amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
}
amdgpu_ring_commit(kiq_ring);
return 0;
}
static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req)
{
int i, r = 0;
if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) {
WREG32_FIELD(CP_HQD_DEQUEUE_REQUEST, DEQUEUE_REQ, req);
for (i = 0; i < adev->usec_timeout; i++) {
if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK))
break;
udelay(1);
}
if (i == adev->usec_timeout)
r = -ETIMEDOUT;
}
WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0);
WREG32(mmCP_HQD_PQ_RPTR, 0);
WREG32(mmCP_HQD_PQ_WPTR, 0);
return r;
}
static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *mqd)
{
struct amdgpu_device *adev = ring->adev;
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
mqd->cp_hqd_queue_priority =
AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
}
}
}
static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct vi_mqd *mqd = ring->mqd_ptr;
uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
uint32_t tmp;
mqd->header = 0xC0310800;
mqd->compute_pipelinestat_enable = 0x00000001;
mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
mqd->compute_misc_reserved = 0x00000003;
mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr
+ offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr
+ offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
eop_base_addr = ring->eop_gpu_addr >> 8;
mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
tmp = RREG32(mmCP_HQD_EOP_CONTROL);
tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
(order_base_2(GFX8_MEC_HPD_SIZE / 4) - 1));
mqd->cp_hqd_eop_control = tmp;
/* enable doorbell? */
tmp = REG_SET_FIELD(RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL),
CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_EN,
ring->use_doorbell ? 1 : 0);
mqd->cp_hqd_pq_doorbell_control = tmp;
/* set the pointer to the MQD */
mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
/* set MQD vmid to 0 */
tmp = RREG32(mmCP_MQD_CONTROL);
tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
mqd->cp_mqd_control = tmp;
/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
hqd_gpu_addr = ring->gpu_addr >> 8;
mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
/* set up the HQD, this is similar to CP_RB0_CNTL */
tmp = RREG32(mmCP_HQD_PQ_CONTROL);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
(order_base_2(ring->ring_size / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
(order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
#ifdef __BIG_ENDIAN
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
#endif
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
mqd->cp_hqd_pq_control = tmp;
/* set the wb address whether it's enabled or not */
wb_gpu_addr = ring->rptr_gpu_addr;
mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
mqd->cp_hqd_pq_rptr_report_addr_hi =
upper_32_bits(wb_gpu_addr) & 0xffff;
/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
wb_gpu_addr = ring->wptr_gpu_addr;
mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
tmp = 0;
/* enable the doorbell if requested */
if (ring->use_doorbell) {
tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_OFFSET, ring->doorbell_index);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_EN, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_SOURCE, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_HIT, 0);
}
mqd->cp_hqd_pq_doorbell_control = tmp;
/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
ring->wptr = 0;
mqd->cp_hqd_pq_wptr = ring->wptr;
mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
/* set the vmid for the queue */
mqd->cp_hqd_vmid = 0;
tmp = RREG32(mmCP_HQD_PERSISTENT_STATE);
tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
mqd->cp_hqd_persistent_state = tmp;
/* set MTYPE */
tmp = RREG32(mmCP_HQD_IB_CONTROL);
tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MTYPE, 3);
mqd->cp_hqd_ib_control = tmp;
tmp = RREG32(mmCP_HQD_IQ_TIMER);
tmp = REG_SET_FIELD(tmp, CP_HQD_IQ_TIMER, MTYPE, 3);
mqd->cp_hqd_iq_timer = tmp;
tmp = RREG32(mmCP_HQD_CTX_SAVE_CONTROL);
tmp = REG_SET_FIELD(tmp, CP_HQD_CTX_SAVE_CONTROL, MTYPE, 3);
mqd->cp_hqd_ctx_save_control = tmp;
/* defaults */
mqd->cp_hqd_eop_rptr = RREG32(mmCP_HQD_EOP_RPTR);
mqd->cp_hqd_eop_wptr = RREG32(mmCP_HQD_EOP_WPTR);
mqd->cp_hqd_ctx_save_base_addr_lo = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_LO);
mqd->cp_hqd_ctx_save_base_addr_hi = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_HI);
mqd->cp_hqd_cntl_stack_offset = RREG32(mmCP_HQD_CNTL_STACK_OFFSET);
mqd->cp_hqd_cntl_stack_size = RREG32(mmCP_HQD_CNTL_STACK_SIZE);
mqd->cp_hqd_wg_state_offset = RREG32(mmCP_HQD_WG_STATE_OFFSET);
mqd->cp_hqd_ctx_save_size = RREG32(mmCP_HQD_CTX_SAVE_SIZE);
mqd->cp_hqd_eop_done_events = RREG32(mmCP_HQD_EOP_EVENTS);
mqd->cp_hqd_error = RREG32(mmCP_HQD_ERROR);
mqd->cp_hqd_eop_wptr_mem = RREG32(mmCP_HQD_EOP_WPTR_MEM);
mqd->cp_hqd_eop_dones = RREG32(mmCP_HQD_EOP_DONES);
/* set static priority for a queue/ring */
gfx_v8_0_mqd_set_priority(ring, mqd);
mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
/* map_queues packet doesn't need activate the queue,
* so only kiq need set this field.
*/
if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
mqd->cp_hqd_active = 1;
return 0;
}
static int gfx_v8_0_mqd_commit(struct amdgpu_device *adev,
struct vi_mqd *mqd)
{
uint32_t mqd_reg;
uint32_t *mqd_data;
/* HQD registers extend from mmCP_MQD_BASE_ADDR to mmCP_HQD_ERROR */
mqd_data = &mqd->cp_mqd_base_addr_lo;
/* disable wptr polling */
WREG32_FIELD(CP_PQ_WPTR_POLL_CNTL, EN, 0);
/* program all HQD registers */
for (mqd_reg = mmCP_HQD_VMID; mqd_reg <= mmCP_HQD_EOP_CONTROL; mqd_reg++)
WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
/* Tonga errata: EOP RPTR/WPTR should be left unmodified.
* This is safe since EOP RPTR==WPTR for any inactive HQD
* on ASICs that do not support context-save.
* EOP writes/reads can start anywhere in the ring.
*/
if (adev->asic_type != CHIP_TONGA) {
WREG32(mmCP_HQD_EOP_RPTR, mqd->cp_hqd_eop_rptr);
WREG32(mmCP_HQD_EOP_WPTR, mqd->cp_hqd_eop_wptr);
WREG32(mmCP_HQD_EOP_WPTR_MEM, mqd->cp_hqd_eop_wptr_mem);
}
for (mqd_reg = mmCP_HQD_EOP_EVENTS; mqd_reg <= mmCP_HQD_ERROR; mqd_reg++)
WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
/* activate the HQD */
for (mqd_reg = mmCP_MQD_BASE_ADDR; mqd_reg <= mmCP_HQD_ACTIVE; mqd_reg++)
WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
return 0;
}
static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct vi_mqd *mqd = ring->mqd_ptr;
gfx_v8_0_kiq_setting(ring);
if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.kiq[0].mqd_backup)
memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(struct vi_mqd_allocation));
/* reset ring buffer */
ring->wptr = 0;
amdgpu_ring_clear_ring(ring);
mutex_lock(&adev->srbm_mutex);
vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
gfx_v8_0_mqd_commit(adev, mqd);
vi_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
} else {
memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
if (amdgpu_sriov_vf(adev) && adev->in_suspend)
amdgpu_ring_clear_ring(ring);
mutex_lock(&adev->srbm_mutex);
vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
gfx_v8_0_mqd_init(ring);
gfx_v8_0_mqd_commit(adev, mqd);
vi_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.kiq[0].mqd_backup)
memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(struct vi_mqd_allocation));
}
return 0;
}
static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct vi_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
mutex_lock(&adev->srbm_mutex);
vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
gfx_v8_0_mqd_init(ring);
vi_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
} else {
/* restore MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
/* reset ring buffer */
ring->wptr = 0;
amdgpu_ring_clear_ring(ring);
}
return 0;
}
static void gfx_v8_0_set_mec_doorbell_range(struct amdgpu_device *adev)
{
if (adev->asic_type > CHIP_TONGA) {
WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, adev->doorbell_index.kiq << 2);
WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, adev->doorbell_index.mec_ring7 << 2);
}
/* enable doorbells */
WREG32_FIELD(CP_PQ_STATUS, DOORBELL_ENABLE, 1);
}
static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
int r;
ring = &adev->gfx.kiq[0].ring;
r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0))
return r;
r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(ring->mqd_obj);
return r;
}
gfx_v8_0_kiq_init_queue(ring);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
amdgpu_bo_unreserve(ring->mqd_obj);
return 0;
}
static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = NULL;
int r = 0, i;
gfx_v8_0_cp_compute_enable(adev, true);
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0))
goto done;
r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr);
if (!r) {
r = gfx_v8_0_kcq_init_queue(ring);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
amdgpu_bo_unreserve(ring->mqd_obj);
if (r)
goto done;
}
gfx_v8_0_set_mec_doorbell_range(adev);
r = gfx_v8_0_kiq_kcq_enable(adev);
if (r)
goto done;
done:
return r;
}
static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev)
{
int r, i;
struct amdgpu_ring *ring;
/* collect all the ring_tests here, gfx, kiq, compute */
ring = &adev->gfx.gfx_ring[0];
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
ring = &adev->gfx.kiq[0].ring;
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
amdgpu_ring_test_helper(ring);
}
return 0;
}
static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
{
int r;
if (!(adev->flags & AMD_IS_APU))
gfx_v8_0_enable_gui_idle_interrupt(adev, false);
r = gfx_v8_0_kiq_resume(adev);
if (r)
return r;
r = gfx_v8_0_cp_gfx_resume(adev);
if (r)
return r;
r = gfx_v8_0_kcq_resume(adev);
if (r)
return r;
r = gfx_v8_0_cp_test_all_rings(adev);
if (r)
return r;
gfx_v8_0_enable_gui_idle_interrupt(adev, true);
return 0;
}
static void gfx_v8_0_cp_enable(struct amdgpu_device *adev, bool enable)
{
gfx_v8_0_cp_gfx_enable(adev, enable);
gfx_v8_0_cp_compute_enable(adev, enable);
}
static int gfx_v8_0_hw_init(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gfx_v8_0_init_golden_registers(adev);
gfx_v8_0_constants_init(adev);
r = adev->gfx.rlc.funcs->resume(adev);
if (r)
return r;
r = gfx_v8_0_cp_resume(adev);
return r;
}
static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev)
{
int r, i;
struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings);
if (r)
DRM_ERROR("Failed to lock KIQ (%d).\n", r);
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
amdgpu_ring_write(kiq_ring, 0);
amdgpu_ring_write(kiq_ring, 0);
amdgpu_ring_write(kiq_ring, 0);
}
r = amdgpu_ring_test_helper(kiq_ring);
if (r)
DRM_ERROR("KCQ disable failed\n");
return r;
}
static bool gfx_v8_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (REG_GET_FIELD(RREG32(mmGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE)
|| RREG32(mmGRBM_STATUS2) != 0x8)
return false;
else
return true;
}
static bool gfx_v8_0_rlc_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (RREG32(mmGRBM_STATUS2) != 0x8)
return false;
else
return true;
}
static int gfx_v8_0_wait_for_rlc_idle(void *handle)
{
unsigned int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
if (gfx_v8_0_rlc_is_idle(handle))
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
static int gfx_v8_0_wait_for_idle(void *handle)
{
unsigned int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
if (gfx_v8_0_is_idle(handle))
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
static int gfx_v8_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.sq_irq, 0);
/* disable KCQ to avoid CPC touch memory not valid anymore */
gfx_v8_0_kcq_disable(adev);
if (amdgpu_sriov_vf(adev)) {
pr_debug("For SRIOV client, shouldn't do anything.\n");
return 0;
}
amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
if (!gfx_v8_0_wait_for_idle(adev))
gfx_v8_0_cp_enable(adev, false);
else
pr_err("cp is busy, skip halt cp\n");
if (!gfx_v8_0_wait_for_rlc_idle(adev))
adev->gfx.rlc.funcs->stop(adev);
else
pr_err("rlc is busy, skip halt rlc\n");
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return 0;
}
static int gfx_v8_0_suspend(void *handle)
{
return gfx_v8_0_hw_fini(handle);
}
static int gfx_v8_0_resume(void *handle)
{
return gfx_v8_0_hw_init(handle);
}
static bool gfx_v8_0_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
u32 tmp;
/* GRBM_STATUS */
tmp = RREG32(mmGRBM_STATUS);
if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK |
GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
}
/* GRBM_STATUS2 */
tmp = RREG32(mmGRBM_STATUS2);
if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
if (REG_GET_FIELD(tmp, GRBM_STATUS2, CPF_BUSY) ||
REG_GET_FIELD(tmp, GRBM_STATUS2, CPC_BUSY) ||
REG_GET_FIELD(tmp, GRBM_STATUS2, CPG_BUSY)) {
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
SOFT_RESET_CPF, 1);
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
SOFT_RESET_CPC, 1);
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
SOFT_RESET_CPG, 1);
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET,
SOFT_RESET_GRBM, 1);
}
/* SRBM_STATUS */
tmp = RREG32(mmSRBM_STATUS);
if (REG_GET_FIELD(tmp, SRBM_STATUS, GRBM_RQ_PENDING))
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
if (REG_GET_FIELD(tmp, SRBM_STATUS, SEM_BUSY))
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
if (grbm_soft_reset || srbm_soft_reset) {
adev->gfx.grbm_soft_reset = grbm_soft_reset;
adev->gfx.srbm_soft_reset = srbm_soft_reset;
return true;
} else {
adev->gfx.grbm_soft_reset = 0;
adev->gfx.srbm_soft_reset = 0;
return false;
}
}
static int gfx_v8_0_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 grbm_soft_reset = 0;
if ((!adev->gfx.grbm_soft_reset) &&
(!adev->gfx.srbm_soft_reset))
return 0;
grbm_soft_reset = adev->gfx.grbm_soft_reset;
/* stop the rlc */
adev->gfx.rlc.funcs->stop(adev);
if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
/* Disable GFX parsing/prefetching */
gfx_v8_0_cp_gfx_enable(adev, false);
if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
int i;
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
mutex_lock(&adev->srbm_mutex);
vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
gfx_v8_0_deactivate_hqd(adev, 2);
vi_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
/* Disable MEC parsing/prefetching */
gfx_v8_0_cp_compute_enable(adev, false);
}
return 0;
}
static int gfx_v8_0_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
u32 tmp;
if ((!adev->gfx.grbm_soft_reset) &&
(!adev->gfx.srbm_soft_reset))
return 0;
grbm_soft_reset = adev->gfx.grbm_soft_reset;
srbm_soft_reset = adev->gfx.srbm_soft_reset;
if (grbm_soft_reset || srbm_soft_reset) {
tmp = RREG32(mmGMCON_DEBUG);
tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 1);
tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 1);
WREG32(mmGMCON_DEBUG, tmp);
udelay(50);
}
if (grbm_soft_reset) {
tmp = RREG32(mmGRBM_SOFT_RESET);
tmp |= grbm_soft_reset;
dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(mmGRBM_SOFT_RESET, tmp);
tmp = RREG32(mmGRBM_SOFT_RESET);
udelay(50);
tmp &= ~grbm_soft_reset;
WREG32(mmGRBM_SOFT_RESET, tmp);
tmp = RREG32(mmGRBM_SOFT_RESET);
}
if (srbm_soft_reset) {
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(mmSRBM_SOFT_RESET, tmp);
tmp = RREG32(mmSRBM_SOFT_RESET);
udelay(50);
tmp &= ~srbm_soft_reset;
WREG32(mmSRBM_SOFT_RESET, tmp);
tmp = RREG32(mmSRBM_SOFT_RESET);
}
if (grbm_soft_reset || srbm_soft_reset) {
tmp = RREG32(mmGMCON_DEBUG);
tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 0);
tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 0);
WREG32(mmGMCON_DEBUG, tmp);
}
/* Wait a little for things to settle down */
udelay(50);
return 0;
}
static int gfx_v8_0_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 grbm_soft_reset = 0;
if ((!adev->gfx.grbm_soft_reset) &&
(!adev->gfx.srbm_soft_reset))
return 0;
grbm_soft_reset = adev->gfx.grbm_soft_reset;
if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
int i;
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
mutex_lock(&adev->srbm_mutex);
vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
gfx_v8_0_deactivate_hqd(adev, 2);
vi_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
gfx_v8_0_kiq_resume(adev);
gfx_v8_0_kcq_resume(adev);
}
if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
gfx_v8_0_cp_gfx_resume(adev);
gfx_v8_0_cp_test_all_rings(adev);
adev->gfx.rlc.funcs->start(adev);
return 0;
}
/**
* gfx_v8_0_get_gpu_clock_counter - return GPU clock counter snapshot
*
* @adev: amdgpu_device pointer
*
* Fetches a GPU clock counter snapshot.
* Returns the 64 bit clock counter snapshot.
*/
static uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{
uint64_t clock;
mutex_lock(&adev->gfx.gpu_clock_mutex);
WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) |
((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
mutex_unlock(&adev->gfx.gpu_clock_mutex);
return clock;
}
static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
uint32_t vmid,
uint32_t gds_base, uint32_t gds_size,
uint32_t gws_base, uint32_t gws_size,
uint32_t oa_base, uint32_t oa_size)
{
/* GDS Base */
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
WRITE_DATA_DST_SEL(0)));
amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, gds_base);
/* GDS Size */
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
WRITE_DATA_DST_SEL(0)));
amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, gds_size);
/* GWS */
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
WRITE_DATA_DST_SEL(0)));
amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
/* OA */
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
WRITE_DATA_DST_SEL(0)));
amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
}
static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
{
WREG32(mmSQ_IND_INDEX,
(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
(address << SQ_IND_INDEX__INDEX__SHIFT) |
(SQ_IND_INDEX__FORCE_READ_MASK));
return RREG32(mmSQ_IND_DATA);
}
static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
uint32_t wave, uint32_t thread,
uint32_t regno, uint32_t num, uint32_t *out)
{
WREG32(mmSQ_IND_INDEX,
(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
(regno << SQ_IND_INDEX__INDEX__SHIFT) |
(thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
(SQ_IND_INDEX__FORCE_READ_MASK) |
(SQ_IND_INDEX__AUTO_INCR_MASK));
while (num--)
*(out++) = RREG32(mmSQ_IND_DATA);
}
static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
{
/* type 0 wave data */
dst[(*no_fields)++] = 0;
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
}
static void gfx_v8_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
uint32_t wave, uint32_t start,
uint32_t size, uint32_t *dst)
{
wave_read_regs(
adev, simd, wave, 0,
start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
}
static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v8_0_select_se_sh,
.read_wave_data = &gfx_v8_0_read_wave_data,
.read_wave_sgprs = &gfx_v8_0_read_wave_sgprs,
.select_me_pipe_q = &gfx_v8_0_select_me_pipe_q
};
static int gfx_v8_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->gfx.xcc_mask = 1;
adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS;
adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
AMDGPU_MAX_COMPUTE_RINGS);
adev->gfx.funcs = &gfx_v8_0_gfx_funcs;
gfx_v8_0_set_ring_funcs(adev);
gfx_v8_0_set_irq_funcs(adev);
gfx_v8_0_set_gds_init(adev);
gfx_v8_0_set_rlc_funcs(adev);
return 0;
}
static int gfx_v8_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
if (r)
return r;
r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
if (r)
return r;
/* requires IBs so do in late init after IB pool is initialized */
r = gfx_v8_0_do_edc_gpr_workarounds(adev);
if (r)
return r;
r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
if (r) {
DRM_ERROR("amdgpu_irq_get() failed to get IRQ for EDC, r: %d.\n", r);
return r;
}
r = amdgpu_irq_get(adev, &adev->gfx.sq_irq, 0);
if (r) {
DRM_ERROR(
"amdgpu_irq_get() failed to get IRQ for SQ, r: %d.\n",
r);
return r;
}
return 0;
}
static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
bool enable)
{
if ((adev->asic_type == CHIP_POLARIS11) ||
(adev->asic_type == CHIP_POLARIS12) ||
(adev->asic_type == CHIP_VEGAM))
/* Send msg to SMU via Powerplay */
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, enable);
WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0);
}
static void gfx_v8_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
bool enable)
{
WREG32_FIELD(RLC_PG_CNTL, DYN_PER_CU_PG_ENABLE, enable ? 1 : 0);
}
static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev,
bool enable)
{
WREG32_FIELD(RLC_PG_CNTL, QUICK_PG_ENABLE, enable ? 1 : 0);
}
static void cz_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
bool enable)
{
WREG32_FIELD(RLC_PG_CNTL, GFX_POWER_GATING_ENABLE, enable ? 1 : 0);
}
static void cz_enable_gfx_pipeline_power_gating(struct amdgpu_device *adev,
bool enable)
{
WREG32_FIELD(RLC_PG_CNTL, GFX_PIPELINE_PG_ENABLE, enable ? 1 : 0);
/* Read any GFX register to wake up GFX. */
if (!enable)
RREG32(mmDB_RENDER_CONTROL);
}
static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev,
bool enable)
{
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
cz_enable_gfx_cg_power_gating(adev, true);
if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
cz_enable_gfx_pipeline_power_gating(adev, true);
} else {
cz_enable_gfx_cg_power_gating(adev, false);
cz_enable_gfx_pipeline_power_gating(adev, false);
}
}
static int gfx_v8_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_PG_STATE_GATE);
if (amdgpu_sriov_vf(adev))
return 0;
if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_RLC_SMU_HS |
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_GFX_DMG))
amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
switch (adev->asic_type) {
case CHIP_CARRIZO:
case CHIP_STONEY:
if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
cz_enable_sck_slow_down_on_power_up(adev, true);
cz_enable_sck_slow_down_on_power_down(adev, true);
} else {
cz_enable_sck_slow_down_on_power_up(adev, false);
cz_enable_sck_slow_down_on_power_down(adev, false);
}
if (adev->pg_flags & AMD_PG_SUPPORT_CP)
cz_enable_cp_power_gating(adev, true);
else
cz_enable_cp_power_gating(adev, false);
cz_update_gfx_cg_power_gating(adev, enable);
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
else
gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
else
gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
break;
case CHIP_POLARIS11:
case CHIP_POLARIS12:
case CHIP_VEGAM:
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
else
gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
else
gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_QUICK_MG) && enable)
polaris11_enable_gfx_quick_mg_power_gating(adev, true);
else
polaris11_enable_gfx_quick_mg_power_gating(adev, false);
break;
default:
break;
}
if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_RLC_SMU_HS |
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_GFX_DMG))
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return 0;
}
static void gfx_v8_0_get_clockgating_state(void *handle, u64 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int data;
if (amdgpu_sriov_vf(adev))
*flags = 0;
/* AMD_CG_SUPPORT_GFX_MGCG */
data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
if (!(data & RLC_CGTT_MGCG_OVERRIDE__CPF_MASK))
*flags |= AMD_CG_SUPPORT_GFX_MGCG;
/* AMD_CG_SUPPORT_GFX_CGLG */
data = RREG32(mmRLC_CGCG_CGLS_CTRL);
if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_CGCG;
/* AMD_CG_SUPPORT_GFX_CGLS */
if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_CGLS;
/* AMD_CG_SUPPORT_GFX_CGTS */
data = RREG32(mmCGTS_SM_CTRL_REG);
if (!(data & CGTS_SM_CTRL_REG__OVERRIDE_MASK))
*flags |= AMD_CG_SUPPORT_GFX_CGTS;
/* AMD_CG_SUPPORT_GFX_CGTS_LS */
if (!(data & CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK))
*flags |= AMD_CG_SUPPORT_GFX_CGTS_LS;
/* AMD_CG_SUPPORT_GFX_RLC_LS */
data = RREG32(mmRLC_MEM_SLP_CNTL);
if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
/* AMD_CG_SUPPORT_GFX_CP_LS */
data = RREG32(mmCP_MEM_SLP_CNTL);
if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
}
static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
uint32_t reg_addr, uint32_t cmd)
{
uint32_t data;
gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
data = RREG32(mmRLC_SERDES_WR_CTRL);
if (adev->asic_type == CHIP_STONEY)
data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
RLC_SERDES_WR_CTRL__POWER_UP_MASK |
RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
else
data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
RLC_SERDES_WR_CTRL__POWER_UP_MASK |
RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
RLC_SERDES_WR_CTRL__BPM_DATA_MASK |
RLC_SERDES_WR_CTRL__REG_ADDR_MASK |
RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
data |= (RLC_SERDES_WR_CTRL__RSVD_BPM_ADDR_MASK |
(cmd << RLC_SERDES_WR_CTRL__BPM_DATA__SHIFT) |
(reg_addr << RLC_SERDES_WR_CTRL__REG_ADDR__SHIFT) |
(0xff << RLC_SERDES_WR_CTRL__BPM_ADDR__SHIFT));
WREG32(mmRLC_SERDES_WR_CTRL, data);
}
#define MSG_ENTER_RLC_SAFE_MODE 1
#define MSG_EXIT_RLC_SAFE_MODE 0
#define RLC_GPR_REG2__REQ_MASK 0x00000001
#define RLC_GPR_REG2__REQ__SHIFT 0
#define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001
#define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e
static bool gfx_v8_0_is_rlc_enabled(struct amdgpu_device *adev)
{
uint32_t rlc_setting;
rlc_setting = RREG32(mmRLC_CNTL);
if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
return false;
return true;
}
static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
uint32_t data;
unsigned i;
data = RREG32(mmRLC_CNTL);
data |= RLC_SAFE_MODE__CMD_MASK;
data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
WREG32(mmRLC_SAFE_MODE, data);
/* wait for RLC_SAFE_MODE */
for (i = 0; i < adev->usec_timeout; i++) {
if ((RREG32(mmRLC_GPM_STAT) &
(RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
(RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
break;
udelay(1);
}
for (i = 0; i < adev->usec_timeout; i++) {
if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
break;
udelay(1);
}
}
static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
uint32_t data;
unsigned i;
data = RREG32(mmRLC_CNTL);
data |= RLC_SAFE_MODE__CMD_MASK;
data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
WREG32(mmRLC_SAFE_MODE, data);
for (i = 0; i < adev->usec_timeout; i++) {
if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
break;
udelay(1);
}
}
static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
{
u32 data;
amdgpu_gfx_off_ctrl(adev, false);
if (amdgpu_sriov_is_pp_one_vf(adev))
data = RREG32_NO_KIQ(mmRLC_SPM_VMID);
else
data = RREG32(mmRLC_SPM_VMID);
data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
if (amdgpu_sriov_is_pp_one_vf(adev))
WREG32_NO_KIQ(mmRLC_SPM_VMID, data);
else
WREG32(mmRLC_SPM_VMID, data);
amdgpu_gfx_off_ctrl(adev, true);
}
static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
.is_rlc_enabled = gfx_v8_0_is_rlc_enabled,
.set_safe_mode = gfx_v8_0_set_safe_mode,
.unset_safe_mode = gfx_v8_0_unset_safe_mode,
.init = gfx_v8_0_rlc_init,
.get_csb_size = gfx_v8_0_get_csb_size,
.get_csb_buffer = gfx_v8_0_get_csb_buffer,
.get_cp_table_num = gfx_v8_0_cp_jump_table_num,
.resume = gfx_v8_0_rlc_resume,
.stop = gfx_v8_0_rlc_stop,
.reset = gfx_v8_0_rlc_reset,
.start = gfx_v8_0_rlc_start,
.update_spm_vmid = gfx_v8_0_update_spm_vmid
};
static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t temp, data;
amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS)
/* 1 - RLC memory Light sleep */
WREG32_FIELD(RLC_MEM_SLP_CNTL, RLC_MEM_LS_EN, 1);
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS)
WREG32_FIELD(CP_MEM_SLP_CNTL, CP_MEM_LS_EN, 1);
}
/* 3 - RLC_CGTT_MGCG_OVERRIDE */
temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
if (adev->flags & AMD_IS_APU)
data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK);
else
data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK |
RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK);
if (temp != data)
WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
/* 4 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
gfx_v8_0_wait_for_rlc_serdes(adev);
/* 5 - clear mgcg override */
gfx_v8_0_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) {
/* 6 - Enable CGTS(Tree Shade) MGCG /MGLS */
temp = data = RREG32(mmCGTS_SM_CTRL_REG);
data &= ~(CGTS_SM_CTRL_REG__SM_MODE_MASK);
data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) &&
(adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS))
data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT);
if (temp != data)
WREG32(mmCGTS_SM_CTRL_REG, data);
}
udelay(50);
/* 7 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
gfx_v8_0_wait_for_rlc_serdes(adev);
} else {
/* 1 - MGCG_OVERRIDE[0] for CP and MGCG_OVERRIDE[1] for RLC */
temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK |
RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK);
if (temp != data)
WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
/* 2 - disable MGLS in RLC */
data = RREG32(mmRLC_MEM_SLP_CNTL);
if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
WREG32(mmRLC_MEM_SLP_CNTL, data);
}
/* 3 - disable MGLS in CP */
data = RREG32(mmCP_MEM_SLP_CNTL);
if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
WREG32(mmCP_MEM_SLP_CNTL, data);
}
/* 4 - Disable CGTS(Tree Shade) MGCG and MGLS */
temp = data = RREG32(mmCGTS_SM_CTRL_REG);
data |= (CGTS_SM_CTRL_REG__OVERRIDE_MASK |
CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK);
if (temp != data)
WREG32(mmCGTS_SM_CTRL_REG, data);
/* 5 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
gfx_v8_0_wait_for_rlc_serdes(adev);
/* 6 - set mgcg override */
gfx_v8_0_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, SET_BPM_SERDES_CMD);
udelay(50);
/* 7- wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
gfx_v8_0_wait_for_rlc_serdes(adev);
}
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t temp, temp1, data, data1;
temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK;
if (temp1 != data1)
WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
/* : wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
gfx_v8_0_wait_for_rlc_serdes(adev);
/* 2 - clear cgcg override */
gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
/* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
gfx_v8_0_wait_for_rlc_serdes(adev);
/* 3 - write cmd to set CGLS */
gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGLS_EN, SET_BPM_SERDES_CMD);
/* 4 - enable cgcg */
data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
/* enable cgls*/
data |= RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK;
if (temp1 != data1)
WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
} else {
data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
}
if (temp != data)
WREG32(mmRLC_CGCG_CGLS_CTRL, data);
/* 5 enable cntx_empty_int_enable/cntx_busy_int_enable/
* Cmp_busy/GFX_Idle interrupts
*/
gfx_v8_0_enable_gui_idle_interrupt(adev, true);
} else {
/* disable cntx_empty_int_enable & GFX Idle interrupt */
gfx_v8_0_enable_gui_idle_interrupt(adev, false);
/* TEST CGCG */
temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
data1 |= (RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK |
RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK);
if (temp1 != data1)
WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
/* read gfx register to wake up cgcg */
RREG32(mmCB_CGTT_SCLK_CTRL);
RREG32(mmCB_CGTT_SCLK_CTRL);
RREG32(mmCB_CGTT_SCLK_CTRL);
RREG32(mmCB_CGTT_SCLK_CTRL);
/* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
gfx_v8_0_wait_for_rlc_serdes(adev);
/* write cmd to Set CGCG Override */
gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, SET_BPM_SERDES_CMD);
/* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
gfx_v8_0_wait_for_rlc_serdes(adev);
/* write cmd to Clear CGLS */
gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGLS_EN, CLE_BPM_SERDES_CMD);
/* disable cgcg, cgls should be disabled too. */
data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK |
RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
if (temp != data)
WREG32(mmRLC_CGCG_CGLS_CTRL, data);
/* enable interrupts again for PG */
gfx_v8_0_enable_gui_idle_interrupt(adev, true);
}
gfx_v8_0_wait_for_rlc_serdes(adev);
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
bool enable)
{
if (enable) {
/* CGCG/CGLS should be enabled after MGCG/MGLS/TS(CG/LS)
* === MGCG + MGLS + TS(CG/LS) ===
*/
gfx_v8_0_update_medium_grain_clock_gating(adev, enable);
gfx_v8_0_update_coarse_grain_clock_gating(adev, enable);
} else {
/* CGCG/CGLS should be disabled before MGCG/MGLS/TS(CG/LS)
* === CGCG + CGLS ===
*/
gfx_v8_0_update_coarse_grain_clock_gating(adev, enable);
gfx_v8_0_update_medium_grain_clock_gating(adev, enable);
}
return 0;
}
static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
enum amd_clockgating_state state)
{
uint32_t msg_id, pp_state = 0;
uint32_t pp_support_state = 0;
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
pp_support_state = PP_STATE_SUPPORT_LS;
pp_state = PP_STATE_LS;
}
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
pp_support_state |= PP_STATE_SUPPORT_CG;
pp_state |= PP_STATE_CG;
}
if (state == AMD_CG_STATE_UNGATE)
pp_state = 0;
msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
PP_BLOCK_GFX_CG,
pp_support_state,
pp_state);
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
pp_support_state = PP_STATE_SUPPORT_LS;
pp_state = PP_STATE_LS;
}
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
pp_support_state |= PP_STATE_SUPPORT_CG;
pp_state |= PP_STATE_CG;
}
if (state == AMD_CG_STATE_UNGATE)
pp_state = 0;
msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
PP_BLOCK_GFX_MG,
pp_support_state,
pp_state);
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
return 0;
}
static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
enum amd_clockgating_state state)
{
uint32_t msg_id, pp_state = 0;
uint32_t pp_support_state = 0;
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
pp_support_state = PP_STATE_SUPPORT_LS;
pp_state = PP_STATE_LS;
}
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
pp_support_state |= PP_STATE_SUPPORT_CG;
pp_state |= PP_STATE_CG;
}
if (state == AMD_CG_STATE_UNGATE)
pp_state = 0;
msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
PP_BLOCK_GFX_CG,
pp_support_state,
pp_state);
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) {
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) {
pp_support_state = PP_STATE_SUPPORT_LS;
pp_state = PP_STATE_LS;
}
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) {
pp_support_state |= PP_STATE_SUPPORT_CG;
pp_state |= PP_STATE_CG;
}
if (state == AMD_CG_STATE_UNGATE)
pp_state = 0;
msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
PP_BLOCK_GFX_3D,
pp_support_state,
pp_state);
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
pp_support_state = PP_STATE_SUPPORT_LS;
pp_state = PP_STATE_LS;
}
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
pp_support_state |= PP_STATE_SUPPORT_CG;
pp_state |= PP_STATE_CG;
}
if (state == AMD_CG_STATE_UNGATE)
pp_state = 0;
msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
PP_BLOCK_GFX_MG,
pp_support_state,
pp_state);
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
pp_support_state = PP_STATE_SUPPORT_LS;
if (state == AMD_CG_STATE_UNGATE)
pp_state = 0;
else
pp_state = PP_STATE_LS;
msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
PP_BLOCK_GFX_RLC,
pp_support_state,
pp_state);
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
pp_support_state = PP_STATE_SUPPORT_LS;
if (state == AMD_CG_STATE_UNGATE)
pp_state = 0;
else
pp_state = PP_STATE_LS;
msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
PP_BLOCK_GFX_CP,
pp_support_state,
pp_state);
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
return 0;
}
static int gfx_v8_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (amdgpu_sriov_vf(adev))
return 0;
switch (adev->asic_type) {
case CHIP_FIJI:
case CHIP_CARRIZO:
case CHIP_STONEY:
gfx_v8_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
case CHIP_TONGA:
gfx_v8_0_tonga_update_gfx_clock_gating(adev, state);
break;
case CHIP_POLARIS10:
case CHIP_POLARIS11:
case CHIP_POLARIS12:
case CHIP_VEGAM:
gfx_v8_0_polaris_update_gfx_clock_gating(adev, state);
break;
default:
break;
}
return 0;
}
static u64 gfx_v8_0_ring_get_rptr(struct amdgpu_ring *ring)
{
return *ring->rptr_cpu_addr;
}
static u64 gfx_v8_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring->use_doorbell)
/* XXX check if swapping is necessary on BE */
return *ring->wptr_cpu_addr;
else
return RREG32(mmCP_RB0_WPTR);
}
static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring->use_doorbell) {
/* XXX check if swapping is necessary on BE */
*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
} else {
WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
(void)RREG32(mmCP_RB0_WPTR);
}
}
static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask, reg_mem_engine;
if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) ||
(ring->funcs->type == AMDGPU_RING_TYPE_KIQ)) {
switch (ring->me) {
case 1:
ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
break;
case 2:
ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe;
break;
default:
return;
}
reg_mem_engine = 0;
} else {
ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
reg_mem_engine = WAIT_REG_MEM_ENGINE(1); /* pfp */
}
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
WAIT_REG_MEM_FUNCTION(3) | /* == */
reg_mem_engine));
amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ);
amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE);
amdgpu_ring_write(ring, ref_and_mask);
amdgpu_ring_write(ring, ref_and_mask);
amdgpu_ring_write(ring, 0x20); /* poll interval */
}
static void gfx_v8_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
amdgpu_ring_write(ring, EVENT_TYPE(VS_PARTIAL_FLUSH) |
EVENT_INDEX(4));
amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
amdgpu_ring_write(ring, EVENT_TYPE(VGT_FLUSH) |
EVENT_INDEX(0));
}
static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_job *job,
struct amdgpu_ib *ib,
uint32_t flags)
{
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 header, control = 0;
if (ib->flags & AMDGPU_IB_FLAG_CE)
header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
else
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
control |= ib->length_dw | (vmid << 24);
if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
control |= INDIRECT_BUFFER_PRE_ENB(1);
if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
gfx_v8_0_ring_emit_de_meta(ring);
}
amdgpu_ring_write(ring, header);
amdgpu_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(ib->gpu_addr & 0xFFFFFFFC));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
amdgpu_ring_write(ring, control);
}
static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
struct amdgpu_job *job,
struct amdgpu_ib *ib,
uint32_t flags)
{
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
/* Currently, there is a high possibility to get wave ID mismatch
* between ME and GDS, leading to a hw deadlock, because ME generates
* different wave IDs than the GDS expects. This situation happens
* randomly when at least 5 compute pipes use GDS ordered append.
* The wave IDs generated by ME are also wrong after suspend/resume.
* Those are probably bugs somewhere else in the kernel driver.
*
* Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
* GDS to 0 for this ring (me/pipe).
*/
if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID - PACKET3_SET_CONFIG_REG_START);
amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
}
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
amdgpu_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(ib->gpu_addr & 0xFFFFFFFC));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
amdgpu_ring_write(ring, control);
}
static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
u64 seq, unsigned flags)
{
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
/* Workaround for cache flush problems. First send a dummy EOP
* event down the pipe with seq one below.
*/
amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
EOP_TC_ACTION_EN |
EOP_TC_WB_ACTION_EN |
EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
EVENT_INDEX(5)));
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
DATA_SEL(1) | INT_SEL(0));
amdgpu_ring_write(ring, lower_32_bits(seq - 1));
amdgpu_ring_write(ring, upper_32_bits(seq - 1));
/* Then send the real EOP event down the pipe:
* EVENT_WRITE_EOP - flush caches, send int */
amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
EOP_TC_ACTION_EN |
EOP_TC_WB_ACTION_EN |
EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
EVENT_INDEX(5)));
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
amdgpu_ring_write(ring, lower_32_bits(seq));
amdgpu_ring_write(ring, upper_32_bits(seq));
}
static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
WAIT_REG_MEM_FUNCTION(3) | /* equal */
WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq);
amdgpu_ring_write(ring, 0xffffffff);
amdgpu_ring_write(ring, 4); /* poll interval */
}
static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
/* wait for the invalidate to complete */
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
WAIT_REG_MEM_FUNCTION(0) | /* always */
WAIT_REG_MEM_ENGINE(0))); /* me */
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 0); /* ref */
amdgpu_ring_write(ring, 0); /* mask */
amdgpu_ring_write(ring, 0x20); /* poll interval */
/* compute doesn't have PFP */
if (usepfp) {
/* sync PFP to ME, otherwise we might get invalid PFP reads */
amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
amdgpu_ring_write(ring, 0x0);
}
}
static u64 gfx_v8_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
{
return *ring->wptr_cpu_addr;
}
static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
/* XXX check if swapping is necessary on BE */
*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
}
static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
u64 addr, u64 seq,
unsigned flags)
{
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
/* RELEASE_MEM - flush caches, send int */
amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
EOP_TC_ACTION_EN |
EOP_TC_WB_ACTION_EN |
EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
EVENT_INDEX(5)));
amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, lower_32_bits(seq));
amdgpu_ring_write(ring, upper_32_bits(seq));
}
static void gfx_v8_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
u64 seq, unsigned int flags)
{
/* we only allocate 32bit for each seq wb address */
BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
/* write fence seq to the "addr" */
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, lower_32_bits(seq));
if (flags & AMDGPU_FENCE_FLAG_INT) {
/* set register to trigger INT */
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
amdgpu_ring_write(ring, mmCPC_INT_STATUS);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
}
}
static void gfx_v8_ring_emit_sb(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
amdgpu_ring_write(ring, 0);
}
static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
{
uint32_t dw2 = 0;
if (amdgpu_sriov_vf(ring->adev))
gfx_v8_0_ring_emit_ce_meta(ring);
dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
if (flags & AMDGPU_HAVE_CTX_SWITCH) {
gfx_v8_0_ring_emit_vgt_flush(ring);
/* set load_global_config & load_global_uconfig */
dw2 |= 0x8001;
/* set load_cs_sh_regs */
dw2 |= 0x01000000;
/* set load_per_context_state & load_gfx_sh_regs for GFX */
dw2 |= 0x10002;
/* set load_ce_ram if preamble presented */
if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
dw2 |= 0x10000000;
} else {
/* still load_ce_ram if this is the first time preamble presented
* although there is no context switch happens.
*/
if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
dw2 |= 0x10000000;
}
amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
amdgpu_ring_write(ring, dw2);
amdgpu_ring_write(ring, 0);
}
static unsigned gfx_v8_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
{
unsigned ret;
amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
ret = ring->wptr & ring->buf_mask;
amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
return ret;
}
static void gfx_v8_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
{
unsigned cur;
BUG_ON(offset > ring->buf_mask);
BUG_ON(ring->ring[offset] != 0x55aa55aa);
cur = (ring->wptr & ring->buf_mask) - 1;
if (likely(cur > offset))
ring->ring[offset] = cur - offset;
else
ring->ring[offset] = (ring->ring_size >> 2) - offset + cur;
}
static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
uint32_t reg_val_offs)
{
struct amdgpu_device *adev = ring->adev;
amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
amdgpu_ring_write(ring, 0 | /* src: register*/
(5 << 8) | /* dst: memory */
(1 << 20)); /* write confirm */
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
reg_val_offs * 4));
amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
reg_val_offs * 4));
}
static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val)
{
uint32_t cmd;
switch (ring->funcs->type) {
case AMDGPU_RING_TYPE_GFX:
cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
break;
case AMDGPU_RING_TYPE_KIQ:
cmd = 1 << 16; /* no inc addr */
break;
default:
cmd = WR_CONFIRM;
break;
}
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, cmd);
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, val);
}
static void gfx_v8_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
{
struct amdgpu_device *adev = ring->adev;
uint32_t value = 0;
value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
WREG32(mmSQ_CMD, value);
}
static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
enum amdgpu_interrupt_state state)
{
WREG32_FIELD(CP_INT_CNTL_RING0, TIME_STAMP_INT_ENABLE,
state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
}
static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
int me, int pipe,
enum amdgpu_interrupt_state state)
{
u32 mec_int_cntl, mec_int_cntl_reg;
/*
* amdgpu controls only the first MEC. That's why this function only
* handles the setting of interrupts for this specific MEC. All other
* pipes' interrupts are set by amdkfd.
*/
if (me == 1) {
switch (pipe) {
case 0:
mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
break;
case 1:
mec_int_cntl_reg = mmCP_ME1_PIPE1_INT_CNTL;
break;
case 2:
mec_int_cntl_reg = mmCP_ME1_PIPE2_INT_CNTL;
break;
case 3:
mec_int_cntl_reg = mmCP_ME1_PIPE3_INT_CNTL;
break;
default:
DRM_DEBUG("invalid pipe %d\n", pipe);
return;
}
} else {
DRM_DEBUG("invalid me %d\n", me);
return;
}
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
mec_int_cntl = RREG32(mec_int_cntl_reg);
mec_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
WREG32(mec_int_cntl_reg, mec_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
mec_int_cntl = RREG32(mec_int_cntl_reg);
mec_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
WREG32(mec_int_cntl_reg, mec_int_cntl);
break;
default:
break;
}
}
static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_REG_INT_ENABLE,
state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
return 0;
}
static int gfx_v8_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_INSTR_INT_ENABLE,
state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
return 0;
}
static int gfx_v8_0_set_eop_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
{
switch (type) {
case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
gfx_v8_0_set_gfx_eop_interrupt_state(adev, state);
break;
case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
break;
case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
break;
case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
break;
case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
break;
case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
break;
case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
break;
case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
break;
case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
break;
default:
break;
}
return 0;
}
static int gfx_v8_0_set_cp_ecc_int_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned int type,
enum amdgpu_interrupt_state state)
{
int enable_flag;
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
enable_flag = 0;
break;
case AMDGPU_IRQ_STATE_ENABLE:
enable_flag = 1;
break;
default:
return -EINVAL;
}
WREG32_FIELD(CP_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, enable_flag);
WREG32_FIELD(CP_INT_CNTL_RING0, CP_ECC_ERROR_INT_ENABLE, enable_flag);
WREG32_FIELD(CP_INT_CNTL_RING1, CP_ECC_ERROR_INT_ENABLE, enable_flag);
WREG32_FIELD(CP_INT_CNTL_RING2, CP_ECC_ERROR_INT_ENABLE, enable_flag);
WREG32_FIELD(CPC_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, enable_flag);
WREG32_FIELD(CP_ME1_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
enable_flag);
WREG32_FIELD(CP_ME1_PIPE1_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
enable_flag);
WREG32_FIELD(CP_ME1_PIPE2_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
enable_flag);
WREG32_FIELD(CP_ME1_PIPE3_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
enable_flag);
WREG32_FIELD(CP_ME2_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
enable_flag);
WREG32_FIELD(CP_ME2_PIPE1_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
enable_flag);
WREG32_FIELD(CP_ME2_PIPE2_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
enable_flag);
WREG32_FIELD(CP_ME2_PIPE3_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
enable_flag);
return 0;
}
static int gfx_v8_0_set_sq_int_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned int type,
enum amdgpu_interrupt_state state)
{
int enable_flag;
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
enable_flag = 1;
break;
case AMDGPU_IRQ_STATE_ENABLE:
enable_flag = 0;
break;
default:
return -EINVAL;
}
WREG32_FIELD(SQ_INTERRUPT_MSG_CTRL, STALL,
enable_flag);
return 0;
}
static int gfx_v8_0_eop_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
int i;
u8 me_id, pipe_id, queue_id;
struct amdgpu_ring *ring;
DRM_DEBUG("IH: CP EOP\n");
me_id = (entry->ring_id & 0x0c) >> 2;
pipe_id = (entry->ring_id & 0x03) >> 0;
queue_id = (entry->ring_id & 0x70) >> 4;
switch (me_id) {
case 0:
amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
break;
case 1:
case 2:
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
/* Per-queue interrupt is supported for MEC starting from VI.
* The interrupt can only be enabled/disabled per pipe instead of per queue.
*/
if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
amdgpu_fence_process(ring);
}
break;
}
return 0;
}
static void gfx_v8_0_fault(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
u8 me_id, pipe_id, queue_id;
struct amdgpu_ring *ring;
int i;
me_id = (entry->ring_id & 0x0c) >> 2;
pipe_id = (entry->ring_id & 0x03) >> 0;
queue_id = (entry->ring_id & 0x70) >> 4;
switch (me_id) {
case 0:
drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
break;
case 1:
case 2:
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
if (ring->me == me_id && ring->pipe == pipe_id &&
ring->queue == queue_id)
drm_sched_fault(&ring->sched);
}
break;
}
}
static int gfx_v8_0_priv_reg_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
DRM_ERROR("Illegal register access in command stream\n");
gfx_v8_0_fault(adev, entry);
return 0;
}
static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
DRM_ERROR("Illegal instruction in command stream\n");
gfx_v8_0_fault(adev, entry);
return 0;
}
static int gfx_v8_0_cp_ecc_error_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
DRM_ERROR("CP EDC/ECC error detected.");
return 0;
}
static void gfx_v8_0_parse_sq_irq(struct amdgpu_device *adev, unsigned ih_data,
bool from_wq)
{
u32 enc, se_id, sh_id, cu_id;
char type[20];
int sq_edc_source = -1;
enc = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, ENCODING);
se_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, SE_ID);
switch (enc) {
case 0:
DRM_INFO("SQ general purpose intr detected:"
"se_id %d, immed_overflow %d, host_reg_overflow %d,"
"host_cmd_overflow %d, cmd_timestamp %d,"
"reg_timestamp %d, thread_trace_buff_full %d,"
"wlt %d, thread_trace %d.\n",
se_id,
REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, IMMED_OVERFLOW),
REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, HOST_REG_OVERFLOW),
REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, HOST_CMD_OVERFLOW),
REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, CMD_TIMESTAMP),
REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, REG_TIMESTAMP),
REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, THREAD_TRACE_BUF_FULL),
REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, WLT),
REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, THREAD_TRACE)
);
break;
case 1:
case 2:
cu_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, CU_ID);
sh_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SH_ID);
/*
* This function can be called either directly from ISR
* or from BH in which case we can access SQ_EDC_INFO
* instance
*/
if (from_wq) {
mutex_lock(&adev->grbm_idx_mutex);
gfx_v8_0_select_se_sh(adev, se_id, sh_id, cu_id, 0);
sq_edc_source = REG_GET_FIELD(RREG32(mmSQ_EDC_INFO), SQ_EDC_INFO, SOURCE);
gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
}
if (enc == 1)
sprintf(type, "instruction intr");
else
sprintf(type, "EDC/ECC error");
DRM_INFO(
"SQ %s detected: "
"se_id %d, sh_id %d, cu_id %d, simd_id %d, wave_id %d, vm_id %d "
"trap %s, sq_ed_info.source %s.\n",
type, se_id, sh_id, cu_id,
REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SIMD_ID),
REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, WAVE_ID),
REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, VM_ID),
REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, PRIV) ? "true" : "false",
(sq_edc_source != -1) ? sq_edc_source_names[sq_edc_source] : "unavailable"
);
break;
default:
DRM_ERROR("SQ invalid encoding type\n.");
}
}
static void gfx_v8_0_sq_irq_work_func(struct work_struct *work)
{
struct amdgpu_device *adev = container_of(work, struct amdgpu_device, gfx.sq_work.work);
struct sq_work *sq_work = container_of(work, struct sq_work, work);
gfx_v8_0_parse_sq_irq(adev, sq_work->ih_data, true);
}
static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
unsigned ih_data = entry->src_data[0];
/*
* Try to submit work so SQ_EDC_INFO can be accessed from
* BH. If previous work submission hasn't finished yet
* just print whatever info is possible directly from the ISR.
*/
if (work_pending(&adev->gfx.sq_work.work)) {
gfx_v8_0_parse_sq_irq(adev, ih_data, false);
} else {
adev->gfx.sq_work.ih_data = ih_data;
schedule_work(&adev->gfx.sq_work.work);
}
return 0;
}
static void gfx_v8_0_emit_mem_sync(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
PACKET3_TC_ACTION_ENA |
PACKET3_SH_KCACHE_ACTION_ENA |
PACKET3_SH_ICACHE_ACTION_ENA |
PACKET3_TC_WB_ACTION_ENA); /* CP_COHER_CNTL */
amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
}
static void gfx_v8_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
PACKET3_TC_ACTION_ENA |
PACKET3_SH_KCACHE_ACTION_ENA |
PACKET3_SH_ICACHE_ACTION_ENA |
PACKET3_TC_WB_ACTION_ENA); /* CP_COHER_CNTL */
amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
amdgpu_ring_write(ring, 0xff); /* CP_COHER_SIZE_HI */
amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */
amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
}
/* mmSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
#define mmSPI_WCL_PIPE_PERCENT_CS_DEFAULT 0x0000007f
static void gfx_v8_0_emit_wave_limit_cs(struct amdgpu_ring *ring,
uint32_t pipe, bool enable)
{
uint32_t val;
uint32_t wcl_cs_reg;
val = enable ? 0x1 : mmSPI_WCL_PIPE_PERCENT_CS_DEFAULT;
switch (pipe) {
case 0:
wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS0;
break;
case 1:
wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS1;
break;
case 2:
wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS2;
break;
case 3:
wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS3;
break;
default:
DRM_DEBUG("invalid pipe %d\n", pipe);
return;
}
amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
}
#define mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT 0x07ffffff
static void gfx_v8_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
{
struct amdgpu_device *adev = ring->adev;
uint32_t val;
int i;
/* mmSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
* number of gfx waves. Setting 5 bit will make sure gfx only gets
* around 25% of gpu resources.
*/
val = enable ? 0x1f : mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT;
amdgpu_ring_emit_wreg(ring, mmSPI_WCL_PIPE_PERCENT_GFX, val);
/* Restrict waves for normal/low priority compute queues as well
* to get best QoS for high priority compute jobs.
*
* amdgpu controls only 1st ME(0-3 CS pipes).
*/
for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
if (i != ring->pipe)
gfx_v8_0_emit_wave_limit_cs(ring, i, enable);
}
}
static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.name = "gfx_v8_0",
.early_init = gfx_v8_0_early_init,
.late_init = gfx_v8_0_late_init,
.sw_init = gfx_v8_0_sw_init,
.sw_fini = gfx_v8_0_sw_fini,
.hw_init = gfx_v8_0_hw_init,
.hw_fini = gfx_v8_0_hw_fini,
.suspend = gfx_v8_0_suspend,
.resume = gfx_v8_0_resume,
.is_idle = gfx_v8_0_is_idle,
.wait_for_idle = gfx_v8_0_wait_for_idle,
.check_soft_reset = gfx_v8_0_check_soft_reset,
.pre_soft_reset = gfx_v8_0_pre_soft_reset,
.soft_reset = gfx_v8_0_soft_reset,
.post_soft_reset = gfx_v8_0_post_soft_reset,
.set_clockgating_state = gfx_v8_0_set_clockgating_state,
.set_powergating_state = gfx_v8_0_set_powergating_state,
.get_clockgating_state = gfx_v8_0_get_clockgating_state,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.type = AMDGPU_RING_TYPE_GFX,
.align_mask = 0xff,
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
.support_64bit_ptrs = false,
.get_rptr = gfx_v8_0_ring_get_rptr,
.get_wptr = gfx_v8_0_ring_get_wptr_gfx,
.set_wptr = gfx_v8_0_ring_set_wptr_gfx,
.emit_frame_size = /* maximum 215dw if count 16 IBs in */
5 + /* COND_EXEC */
7 + /* PIPELINE_SYNC */
VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 9 + /* VM_FLUSH */
12 + /* FENCE for VM_FLUSH */
20 + /* GDS switch */
4 + /* double SWITCH_BUFFER,
the first COND_EXEC jump to the place just
prior to this double SWITCH_BUFFER */
5 + /* COND_EXEC */
7 + /* HDP_flush */
4 + /* VGT_flush */
14 + /* CE_META */
31 + /* DE_META */
3 + /* CNTX_CTRL */
5 + /* HDP_INVL */
12 + 12 + /* FENCE x2 */
2 + /* SWITCH_BUFFER */
5, /* SURFACE_SYNC */
.emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */
.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
.emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
.test_ring = gfx_v8_0_ring_test_ring,
.test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_switch_buffer = gfx_v8_ring_emit_sb,
.emit_cntxcntl = gfx_v8_ring_emit_cntxcntl,
.init_cond_exec = gfx_v8_0_ring_emit_init_cond_exec,
.patch_cond_exec = gfx_v8_0_ring_emit_patch_cond_exec,
.emit_wreg = gfx_v8_0_ring_emit_wreg,
.soft_recovery = gfx_v8_0_ring_soft_recovery,
.emit_mem_sync = gfx_v8_0_emit_mem_sync,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.type = AMDGPU_RING_TYPE_COMPUTE,
.align_mask = 0xff,
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
.support_64bit_ptrs = false,
.get_rptr = gfx_v8_0_ring_get_rptr,
.get_wptr = gfx_v8_0_ring_get_wptr_compute,
.set_wptr = gfx_v8_0_ring_set_wptr_compute,
.emit_frame_size =
20 + /* gfx_v8_0_ring_emit_gds_switch */
7 + /* gfx_v8_0_ring_emit_hdp_flush */
5 + /* hdp_invalidate */
7 + /* gfx_v8_0_ring_emit_pipeline_sync */
VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v8_0_ring_emit_vm_flush */
7 + 7 + 7 + /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
7 + /* gfx_v8_0_emit_mem_sync_compute */
5 + /* gfx_v8_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
15, /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */
.emit_ib_size = 7, /* gfx_v8_0_ring_emit_ib_compute */
.emit_ib = gfx_v8_0_ring_emit_ib_compute,
.emit_fence = gfx_v8_0_ring_emit_fence_compute,
.emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
.emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
.test_ring = gfx_v8_0_ring_test_ring,
.test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v8_0_ring_emit_wreg,
.emit_mem_sync = gfx_v8_0_emit_mem_sync_compute,
.emit_wave_limit = gfx_v8_0_emit_wave_limit,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
.type = AMDGPU_RING_TYPE_KIQ,
.align_mask = 0xff,
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
.support_64bit_ptrs = false,
.get_rptr = gfx_v8_0_ring_get_rptr,
.get_wptr = gfx_v8_0_ring_get_wptr_compute,
.set_wptr = gfx_v8_0_ring_set_wptr_compute,
.emit_frame_size =
20 + /* gfx_v8_0_ring_emit_gds_switch */
7 + /* gfx_v8_0_ring_emit_hdp_flush */
5 + /* hdp_invalidate */
7 + /* gfx_v8_0_ring_emit_pipeline_sync */
17 + /* gfx_v8_0_ring_emit_vm_flush */
7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_kiq x3 for user fence, vm fence */
.emit_ib_size = 7, /* gfx_v8_0_ring_emit_ib_compute */
.emit_fence = gfx_v8_0_ring_emit_fence_kiq,
.test_ring = gfx_v8_0_ring_test_ring,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_rreg = gfx_v8_0_ring_emit_rreg,
.emit_wreg = gfx_v8_0_ring_emit_wreg,
};
static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
adev->gfx.kiq[0].ring.funcs = &gfx_v8_0_ring_funcs_kiq;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
adev->gfx.gfx_ring[i].funcs = &gfx_v8_0_ring_funcs_gfx;
for (i = 0; i < adev->gfx.num_compute_rings; i++)
adev->gfx.compute_ring[i].funcs = &gfx_v8_0_ring_funcs_compute;
}
static const struct amdgpu_irq_src_funcs gfx_v8_0_eop_irq_funcs = {
.set = gfx_v8_0_set_eop_interrupt_state,
.process = gfx_v8_0_eop_irq,
};
static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_reg_irq_funcs = {
.set = gfx_v8_0_set_priv_reg_fault_state,
.process = gfx_v8_0_priv_reg_irq,
};
static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_inst_irq_funcs = {
.set = gfx_v8_0_set_priv_inst_fault_state,
.process = gfx_v8_0_priv_inst_irq,
};
static const struct amdgpu_irq_src_funcs gfx_v8_0_cp_ecc_error_irq_funcs = {
.set = gfx_v8_0_set_cp_ecc_int_state,
.process = gfx_v8_0_cp_ecc_error_irq,
};
static const struct amdgpu_irq_src_funcs gfx_v8_0_sq_irq_funcs = {
.set = gfx_v8_0_set_sq_int_state,
.process = gfx_v8_0_sq_irq,
};
static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
adev->gfx.eop_irq.funcs = &gfx_v8_0_eop_irq_funcs;
adev->gfx.priv_reg_irq.num_types = 1;
adev->gfx.priv_reg_irq.funcs = &gfx_v8_0_priv_reg_irq_funcs;
adev->gfx.priv_inst_irq.num_types = 1;
adev->gfx.priv_inst_irq.funcs = &gfx_v8_0_priv_inst_irq_funcs;
adev->gfx.cp_ecc_error_irq.num_types = 1;
adev->gfx.cp_ecc_error_irq.funcs = &gfx_v8_0_cp_ecc_error_irq_funcs;
adev->gfx.sq_irq.num_types = 1;
adev->gfx.sq_irq.funcs = &gfx_v8_0_sq_irq_funcs;
}
static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev)
{
adev->gfx.rlc.funcs = &iceland_rlc_funcs;
}
static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev)
{
/* init asci gds info */
adev->gds.gds_size = RREG32(mmGDS_VMID0_SIZE);
adev->gds.gws_size = 64;
adev->gds.oa_size = 16;
adev->gds.gds_compute_max_wave_id = RREG32(mmGDS_COMPUTE_MAX_WAVE_ID);
}
static void gfx_v8_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
u32 bitmap)
{
u32 data;
if (!bitmap)
return;
data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
}
static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev)
{
u32 data, mask;
data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG) |
RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
return ~REG_GET_FIELD(data, CC_GC_SHADER_ARRAY_CONFIG, INACTIVE_CUS) & mask;
}
static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
{
int i, j, k, counter, active_cu_number = 0;
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
unsigned disable_masks[4 * 2];
u32 ao_cu_num;
memset(cu_info, 0, sizeof(*cu_info));
if (adev->flags & AMD_IS_APU)
ao_cu_num = 2;
else
ao_cu_num = adev->gfx.config.max_cu_per_sh;
amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
mask = 1;
ao_bitmap = 0;
counter = 0;
gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0);
if (i < 4 && j < 2)
gfx_v8_0_set_user_cu_inactive_bitmap(
adev, disable_masks[i * 2 + j]);
bitmap = gfx_v8_0_get_cu_active_bitmap(adev);
cu_info->bitmap[0][i][j] = bitmap;
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
if (bitmap & mask) {
if (counter < ao_cu_num)
ao_bitmap |= mask;
counter ++;
}
mask <<= 1;
}
active_cu_number += counter;
if (i < 2 && j < 2)
ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
}
}
gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
cu_info->number = active_cu_number;
cu_info->ao_cu_mask = ao_cu_mask;
cu_info->simd_per_cu = NUM_SIMD_PER_CU;
cu_info->max_waves_per_simd = 10;
cu_info->max_scratch_slots_per_cu = 32;
cu_info->wave_front_size = 64;
cu_info->lds_size = 64;
}
const struct amdgpu_ip_block_version gfx_v8_0_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_GFX,
.major = 8,
.minor = 0,
.rev = 0,
.funcs = &gfx_v8_0_ip_funcs,
};
const struct amdgpu_ip_block_version gfx_v8_1_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_GFX,
.major = 8,
.minor = 1,
.rev = 0,
.funcs = &gfx_v8_0_ip_funcs,
};
static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
{
uint64_t ce_payload_addr;
int cnt_ce;
union {
struct vi_ce_ib_state regular;
struct vi_ce_ib_state_chained_ib chained;
} ce_payload = {};
if (ring->adev->virt.chained_ib_support) {
ce_payload_addr = amdgpu_csa_vaddr(ring->adev) +
offsetof(struct vi_gfx_meta_data_chained_ib, ce_payload);
cnt_ce = (sizeof(ce_payload.chained) >> 2) + 4 - 2;
} else {
ce_payload_addr = amdgpu_csa_vaddr(ring->adev) +
offsetof(struct vi_gfx_meta_data, ce_payload);
cnt_ce = (sizeof(ce_payload.regular) >> 2) + 4 - 2;
}
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt_ce));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
WRITE_DATA_DST_SEL(8) |
WR_CONFIRM) |
WRITE_DATA_CACHE_POLICY(0));
amdgpu_ring_write(ring, lower_32_bits(ce_payload_addr));
amdgpu_ring_write(ring, upper_32_bits(ce_payload_addr));
amdgpu_ring_write_multiple(ring, (void *)&ce_payload, cnt_ce - 2);
}
static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring)
{
uint64_t de_payload_addr, gds_addr, csa_addr;
int cnt_de;
union {
struct vi_de_ib_state regular;
struct vi_de_ib_state_chained_ib chained;
} de_payload = {};
csa_addr = amdgpu_csa_vaddr(ring->adev);
gds_addr = csa_addr + 4096;
if (ring->adev->virt.chained_ib_support) {
de_payload.chained.gds_backup_addrlo = lower_32_bits(gds_addr);
de_payload.chained.gds_backup_addrhi = upper_32_bits(gds_addr);
de_payload_addr = csa_addr + offsetof(struct vi_gfx_meta_data_chained_ib, de_payload);
cnt_de = (sizeof(de_payload.chained) >> 2) + 4 - 2;
} else {
de_payload.regular.gds_backup_addrlo = lower_32_bits(gds_addr);
de_payload.regular.gds_backup_addrhi = upper_32_bits(gds_addr);
de_payload_addr = csa_addr + offsetof(struct vi_gfx_meta_data, de_payload);
cnt_de = (sizeof(de_payload.regular) >> 2) + 4 - 2;
}
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt_de));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(8) |
WR_CONFIRM) |
WRITE_DATA_CACHE_POLICY(0));
amdgpu_ring_write(ring, lower_32_bits(de_payload_addr));
amdgpu_ring_write(ring, upper_32_bits(de_payload_addr));
amdgpu_ring_write_multiple(ring, (void *)&de_payload, cnt_de - 2);
}
| linux-master | drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c |
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "nbio_v7_9.h"
#include "amdgpu_ras.h"
#include "nbio/nbio_7_9_0_offset.h"
#include "nbio/nbio_7_9_0_sh_mask.h"
#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
#include <uapi/linux/kfd_ioctl.h>
#define NPS_MODE_MASK 0x000000FFL
/* Core 0 Port 0 counter */
#define smnPCIEP_NAK_COUNTER 0x1A340218
#define smnPCIE_PERF_CNTL_TXCLK3 0x1A38021c
#define smnPCIE_PERF_CNTL_TXCLK7 0x1A380888
#define smnPCIE_PERF_COUNT_CNTL 0x1A380200
#define smnPCIE_PERF_COUNT0_TXCLK3 0x1A380220
#define smnPCIE_PERF_COUNT0_TXCLK7 0x1A38088C
#define smnPCIE_PERF_COUNT0_UPVAL_TXCLK3 0x1A3808F8
#define smnPCIE_PERF_COUNT0_UPVAL_TXCLK7 0x1A380918
static void nbio_v7_9_remap_hdp_registers(struct amdgpu_device *adev)
{
WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL,
adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_REG_FLUSH_CNTL,
adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
}
static u32 nbio_v7_9_get_rev_id(struct amdgpu_device *adev)
{
u32 tmp;
tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0);
tmp = REG_GET_FIELD(tmp, RCC_STRAP0_RCC_DEV0_EPF0_STRAP0, STRAP_ATI_REV_ID_DEV0_F0);
return tmp;
}
static void nbio_v7_9_mc_access_enable(struct amdgpu_device *adev, bool enable)
{
if (enable)
WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN,
BIF_BX0_BIF_FB_EN__FB_READ_EN_MASK | BIF_BX0_BIF_FB_EN__FB_WRITE_EN_MASK);
else
WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN, 0);
}
static u32 nbio_v7_9_get_memsize(struct amdgpu_device *adev)
{
return RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE);
}
static void nbio_v7_9_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
bool use_doorbell, int doorbell_index, int doorbell_size)
{
u32 doorbell_range = 0, doorbell_ctrl = 0;
int aid_id, dev_inst;
dev_inst = GET_INST(SDMA0, instance);
aid_id = adev->sdma.instance[instance].aid_id;
if (use_doorbell == false)
return;
doorbell_range =
REG_SET_FIELD(doorbell_range, DOORBELL0_CTRL_ENTRY_0,
BIF_DOORBELL0_RANGE_OFFSET_ENTRY, doorbell_index);
doorbell_range =
REG_SET_FIELD(doorbell_range, DOORBELL0_CTRL_ENTRY_0,
BIF_DOORBELL0_RANGE_SIZE_ENTRY, doorbell_size);
doorbell_ctrl =
REG_SET_FIELD(doorbell_ctrl, S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_ENABLE, 1);
doorbell_ctrl =
REG_SET_FIELD(doorbell_ctrl, S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_RANGE_SIZE, doorbell_size);
switch (dev_inst % adev->sdma.num_inst_per_aid) {
case 0:
WREG32_SOC15_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_1,
4 * aid_id, doorbell_range);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_AWID, 0xe);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_RANGE_OFFSET, 0xe);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE,
0x1);
WREG32_SOC15_EXT(NBIO, aid_id, regS2A_DOORBELL_ENTRY_1_CTRL,
aid_id, doorbell_ctrl);
break;
case 1:
WREG32_SOC15_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_2,
4 * aid_id, doorbell_range);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_AWID, 0x8);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_RANGE_OFFSET, 0x8);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE,
0x2);
WREG32_SOC15_EXT(NBIO, aid_id, regS2A_DOORBELL_ENTRY_2_CTRL,
aid_id, doorbell_ctrl);
break;
case 2:
WREG32_SOC15_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_3,
4 * aid_id, doorbell_range);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_AWID, 0x9);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_RANGE_OFFSET, 0x9);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE,
0x8);
WREG32_SOC15_EXT(NBIO, aid_id, regS2A_DOORBELL_ENTRY_5_CTRL,
aid_id, doorbell_ctrl);
break;
case 3:
WREG32_SOC15_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_4,
4 * aid_id, doorbell_range);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_AWID, 0xa);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_RANGE_OFFSET, 0xa);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE,
0x9);
WREG32_SOC15_EXT(NBIO, aid_id, regS2A_DOORBELL_ENTRY_6_CTRL,
aid_id, doorbell_ctrl);
break;
default:
break;
}
return;
}
static void nbio_v7_9_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell,
int doorbell_index, int instance)
{
u32 doorbell_range = 0, doorbell_ctrl = 0;
u32 aid_id = instance;
if (use_doorbell) {
doorbell_range = REG_SET_FIELD(doorbell_range,
DOORBELL0_CTRL_ENTRY_0,
BIF_DOORBELL0_RANGE_OFFSET_ENTRY,
doorbell_index);
doorbell_range = REG_SET_FIELD(doorbell_range,
DOORBELL0_CTRL_ENTRY_0,
BIF_DOORBELL0_RANGE_SIZE_ENTRY,
0x9);
if (aid_id)
doorbell_range = REG_SET_FIELD(doorbell_range,
DOORBELL0_CTRL_ENTRY_0,
DOORBELL0_FENCE_ENABLE_ENTRY,
0x4);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_ENABLE, 1);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_AWID, 0x4);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_RANGE_OFFSET, 0x4);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_RANGE_SIZE, 0x9);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 0x4);
WREG32_SOC15_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_17,
aid_id, doorbell_range);
WREG32_SOC15_EXT(NBIO, aid_id, regS2A_DOORBELL_ENTRY_4_CTRL,
aid_id, doorbell_ctrl);
} else {
doorbell_range = REG_SET_FIELD(doorbell_range,
DOORBELL0_CTRL_ENTRY_0,
BIF_DOORBELL0_RANGE_SIZE_ENTRY, 0);
doorbell_ctrl = REG_SET_FIELD(doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_RANGE_SIZE, 0);
WREG32_SOC15_OFFSET(NBIO, 0, regDOORBELL0_CTRL_ENTRY_17,
aid_id, doorbell_range);
WREG32_SOC15_EXT(NBIO, aid_id, regS2A_DOORBELL_ENTRY_4_CTRL,
aid_id, doorbell_ctrl);
}
}
static void nbio_v7_9_enable_doorbell_aperture(struct amdgpu_device *adev,
bool enable)
{
/* Enable to allow doorbell pass thru on pre-silicon bare-metal */
WREG32_SOC15(NBIO, 0, regBIFC_DOORBELL_ACCESS_EN_PF, 0xfffff);
WREG32_FIELD15_PREREG(NBIO, 0, RCC_DEV0_EPF0_RCC_DOORBELL_APER_EN,
BIF_DOORBELL_APER_EN, enable ? 1 : 0);
}
static void nbio_v7_9_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
bool enable)
{
u32 tmp = 0;
if (enable) {
tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
DOORBELL_SELFRING_GPA_APER_EN, 1) |
REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
DOORBELL_SELFRING_GPA_APER_MODE, 1) |
REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
DOORBELL_SELFRING_GPA_APER_SIZE, 0);
WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW,
lower_32_bits(adev->doorbell.base));
WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH,
upper_32_bits(adev->doorbell.base));
}
WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, tmp);
}
static void nbio_v7_9_ih_doorbell_range(struct amdgpu_device *adev,
bool use_doorbell, int doorbell_index)
{
u32 ih_doorbell_range = 0, ih_doorbell_ctrl = 0;
if (use_doorbell) {
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
DOORBELL0_CTRL_ENTRY_0,
BIF_DOORBELL0_RANGE_OFFSET_ENTRY,
doorbell_index);
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
DOORBELL0_CTRL_ENTRY_0,
BIF_DOORBELL0_RANGE_SIZE_ENTRY,
0x8);
ih_doorbell_ctrl = REG_SET_FIELD(ih_doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_ENABLE, 1);
ih_doorbell_ctrl = REG_SET_FIELD(ih_doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_AWID, 0);
ih_doorbell_ctrl = REG_SET_FIELD(ih_doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_RANGE_OFFSET, 0);
ih_doorbell_ctrl = REG_SET_FIELD(ih_doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_RANGE_SIZE, 0x8);
ih_doorbell_ctrl = REG_SET_FIELD(ih_doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, 0);
} else {
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
DOORBELL0_CTRL_ENTRY_0,
BIF_DOORBELL0_RANGE_SIZE_ENTRY, 0);
ih_doorbell_ctrl = REG_SET_FIELD(ih_doorbell_ctrl,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_RANGE_SIZE, 0);
}
WREG32_SOC15(NBIO, 0, regDOORBELL0_CTRL_ENTRY_0, ih_doorbell_range);
WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_3_CTRL, ih_doorbell_ctrl);
}
static void nbio_v7_9_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
}
static void nbio_v7_9_update_medium_grain_light_sleep(struct amdgpu_device *adev,
bool enable)
{
}
static void nbio_v7_9_get_clockgating_state(struct amdgpu_device *adev,
u64 *flags)
{
}
static void nbio_v7_9_ih_control(struct amdgpu_device *adev)
{
u32 interrupt_cntl;
/* setup interrupt control */
WREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
interrupt_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL);
/* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
*/
interrupt_cntl =
REG_SET_FIELD(interrupt_cntl, BIF_BX0_INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
/* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
interrupt_cntl =
REG_SET_FIELD(interrupt_cntl, BIF_BX0_INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
WREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL, interrupt_cntl);
}
static u32 nbio_v7_9_get_hdp_flush_req_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_GPU_HDP_FLUSH_REQ);
}
static u32 nbio_v7_9_get_hdp_flush_done_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_GPU_HDP_FLUSH_DONE);
}
static u32 nbio_v7_9_get_pcie_index_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX0_PCIE_INDEX2);
}
static u32 nbio_v7_9_get_pcie_data_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX0_PCIE_DATA2);
}
static u32 nbio_v7_9_get_pcie_index_hi_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX0_PCIE_INDEX2_HI);
}
const struct nbio_hdp_flush_reg nbio_v7_9_hdp_flush_reg = {
.ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
.ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
.ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
.ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK,
.ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK,
.ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK,
.ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK,
.ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK,
.ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK,
.ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK,
.ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK,
.ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK,
.ref_and_mask_sdma2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK,
.ref_and_mask_sdma3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK,
.ref_and_mask_sdma4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK,
.ref_and_mask_sdma5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK,
.ref_and_mask_sdma6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK,
.ref_and_mask_sdma7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK,
};
static void nbio_v7_9_enable_doorbell_interrupt(struct amdgpu_device *adev,
bool enable)
{
WREG32_FIELD15_PREREG(NBIO, 0, BIF_BX0_BIF_DOORBELL_INT_CNTL,
DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
}
static int nbio_v7_9_get_compute_partition_mode(struct amdgpu_device *adev)
{
u32 tmp, px;
tmp = RREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_COMPUTE_STATUS);
px = REG_GET_FIELD(tmp, BIF_BX_PF0_PARTITION_COMPUTE_STATUS,
PARTITION_MODE);
return px;
}
static u32 nbio_v7_9_get_memory_partition_mode(struct amdgpu_device *adev,
u32 *supp_modes)
{
u32 tmp;
tmp = RREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_MEM_STATUS);
tmp = REG_GET_FIELD(tmp, BIF_BX_PF0_PARTITION_MEM_STATUS, NPS_MODE);
if (supp_modes) {
*supp_modes =
RREG32_SOC15(NBIO, 0, regBIF_BX_PF0_PARTITION_MEM_CAP);
}
return ffs(tmp);
}
static void nbio_v7_9_init_registers(struct amdgpu_device *adev)
{
u32 inst_mask;
int i;
WREG32_SOC15(NBIO, 0, regXCC_DOORBELL_FENCE,
0xff & ~(adev->gfx.xcc_mask));
WREG32_SOC15(NBIO, 0, regBIFC_GFX_INT_MONITOR_MASK, 0x7ff);
inst_mask = adev->aid_mask & ~1U;
for_each_inst(i, inst_mask) {
WREG32_SOC15_EXT(NBIO, i, regXCC_DOORBELL_FENCE, i,
XCC_DOORBELL_FENCE__SHUB_SLV_MODE_MASK);
}
if (!amdgpu_sriov_vf(adev)) {
u32 baco_cntl;
for_each_inst(i, adev->aid_mask) {
baco_cntl = RREG32_SOC15(NBIO, i, regBIF_BX0_BACO_CNTL);
if (baco_cntl & (BIF_BX0_BACO_CNTL__BACO_DUMMY_EN_MASK |
BIF_BX0_BACO_CNTL__BACO_EN_MASK)) {
baco_cntl &= ~(
BIF_BX0_BACO_CNTL__BACO_DUMMY_EN_MASK |
BIF_BX0_BACO_CNTL__BACO_EN_MASK);
dev_dbg(adev->dev,
"Unsetting baco dummy mode %x",
baco_cntl);
WREG32_SOC15(NBIO, i, regBIF_BX0_BACO_CNTL,
baco_cntl);
}
}
}
}
static u64 nbio_v7_9_get_pcie_replay_count(struct amdgpu_device *adev)
{
u32 val, nak_r, nak_g;
if (adev->flags & AMD_IS_APU)
return 0;
/* Get the number of NAKs received and generated */
val = RREG32_PCIE(smnPCIEP_NAK_COUNTER);
nak_r = val & 0xFFFF;
nak_g = val >> 16;
/* Add the total number of NAKs, i.e the number of replays */
return (nak_r + nak_g);
}
static void nbio_v7_9_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
uint64_t *count1)
{
uint32_t perfctrrx = 0;
uint32_t perfctrtx = 0;
/* This reports 0 on APUs, so return to avoid writing/reading registers
* that may or may not be different from their GPU counterparts
*/
if (adev->flags & AMD_IS_APU)
return;
/* Use TXCLK3 counter group for rx event */
/* Use TXCLK7 counter group for tx event */
/* Set the 2 events that we wish to watch, defined above */
/* 40 is event# for received msgs */
/* 2 is event# of posted requests sent */
perfctrrx = REG_SET_FIELD(perfctrrx, PCIE_PERF_CNTL_TXCLK3, EVENT0_SEL, 40);
perfctrtx = REG_SET_FIELD(perfctrtx, PCIE_PERF_CNTL_TXCLK7, EVENT0_SEL, 2);
/* Write to enable desired perf counters */
WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctrrx);
WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK7, perfctrtx);
/* Zero out and enable SHADOW_WR
* Write 0x6:
* Bit 1 = Global Shadow wr(1)
* Bit 2 = Global counter reset enable(1)
*/
WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000006);
/* Enable Gloabl Counter
* Write 0x1:
* Bit 0 = Global Counter Enable(1)
*/
WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000001);
msleep(1000);
/* Disable Global Counter, Reset and enable SHADOW_WR
* Write 0x6:
* Bit 1 = Global Shadow wr(1)
* Bit 2 = Global counter reset enable(1)
*/
WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000006);
/* Get the upper and lower count */
*count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) |
((uint64_t)RREG32_PCIE(smnPCIE_PERF_COUNT0_UPVAL_TXCLK3) << 32);
*count1 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK7) |
((uint64_t)RREG32_PCIE(smnPCIE_PERF_COUNT0_UPVAL_TXCLK7) << 32);
}
const struct amdgpu_nbio_funcs nbio_v7_9_funcs = {
.get_hdp_flush_req_offset = nbio_v7_9_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v7_9_get_hdp_flush_done_offset,
.get_pcie_index_offset = nbio_v7_9_get_pcie_index_offset,
.get_pcie_data_offset = nbio_v7_9_get_pcie_data_offset,
.get_pcie_index_hi_offset = nbio_v7_9_get_pcie_index_hi_offset,
.get_rev_id = nbio_v7_9_get_rev_id,
.mc_access_enable = nbio_v7_9_mc_access_enable,
.get_memsize = nbio_v7_9_get_memsize,
.sdma_doorbell_range = nbio_v7_9_sdma_doorbell_range,
.vcn_doorbell_range = nbio_v7_9_vcn_doorbell_range,
.enable_doorbell_aperture = nbio_v7_9_enable_doorbell_aperture,
.enable_doorbell_selfring_aperture = nbio_v7_9_enable_doorbell_selfring_aperture,
.ih_doorbell_range = nbio_v7_9_ih_doorbell_range,
.enable_doorbell_interrupt = nbio_v7_9_enable_doorbell_interrupt,
.update_medium_grain_clock_gating = nbio_v7_9_update_medium_grain_clock_gating,
.update_medium_grain_light_sleep = nbio_v7_9_update_medium_grain_light_sleep,
.get_clockgating_state = nbio_v7_9_get_clockgating_state,
.ih_control = nbio_v7_9_ih_control,
.remap_hdp_registers = nbio_v7_9_remap_hdp_registers,
.get_compute_partition_mode = nbio_v7_9_get_compute_partition_mode,
.get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode,
.init_registers = nbio_v7_9_init_registers,
.get_pcie_replay_count = nbio_v7_9_get_pcie_replay_count,
.get_pcie_usage = nbio_v7_9_get_pcie_usage,
};
static void nbio_v7_9_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
return;
}
static void nbio_v7_9_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev)
{
uint32_t bif_doorbell_intr_cntl;
struct ras_manager *obj = amdgpu_ras_find_obj(adev, adev->nbio.ras_if);
struct ras_err_data err_data = {0, 0, 0, NULL};
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL);
if (REG_GET_FIELD(bif_doorbell_intr_cntl,
BIF_BX0_BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) {
/* driver has to clear the interrupt status when bif ring is disabled */
bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
BIF_BX0_BIF_DOORBELL_INT_CNTL,
RAS_CNTLR_INTERRUPT_CLEAR, 1);
WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
if (!ras->disable_ras_err_cnt_harvest) {
/*
* clear error status after ras_controller_intr
* according to hw team and count ue number
* for query
*/
nbio_v7_9_query_ras_error_count(adev, &err_data);
/* logging on error cnt and printing for awareness */
obj->err_data.ue_count += err_data.ue_count;
obj->err_data.ce_count += err_data.ce_count;
if (err_data.ce_count)
dev_info(adev->dev, "%ld correctable hardware "
"errors detected in %s block, "
"no user action is needed.\n",
obj->err_data.ce_count,
get_ras_block_str(adev->nbio.ras_if));
if (err_data.ue_count)
dev_info(adev->dev, "%ld uncorrectable hardware "
"errors detected in %s block\n",
obj->err_data.ue_count,
get_ras_block_str(adev->nbio.ras_if));
}
dev_info(adev->dev, "RAS controller interrupt triggered "
"by NBIF error\n");
/* ras_controller_int is dedicated for nbif ras error,
* not the global interrupt for sync flood
*/
amdgpu_ras_reset_gpu(adev);
}
}
static void nbio_v7_9_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev)
{
uint32_t bif_doorbell_intr_cntl;
bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL);
if (REG_GET_FIELD(bif_doorbell_intr_cntl,
BIF_BX0_BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) {
/* driver has to clear the interrupt status when bif ring is disabled */
bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
BIF_BX0_BIF_DOORBELL_INT_CNTL,
RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1);
WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
amdgpu_ras_global_ras_isr(adev);
}
}
static int nbio_v7_9_set_ras_controller_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
{
/* Dummy function, there is no initialization operation in driver */
return 0;
}
static int nbio_v7_9_process_ras_controller_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
/* By design, the ih cookie for ras_controller_irq should be written
* to BIFring instead of general iv ring. However, due to known bif ring
* hw bug, it has to be disabled. There is no chance the process function
* will be involked. Just left it as a dummy one.
*/
return 0;
}
static int nbio_v7_9_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
{
/* Dummy function, there is no initialization operation in driver */
return 0;
}
static int nbio_v7_9_process_err_event_athub_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
/* By design, the ih cookie for err_event_athub_irq should be written
* to BIFring instead of general iv ring. However, due to known bif ring
* hw bug, it has to be disabled. There is no chance the process function
* will be involked. Just left it as a dummy one.
*/
return 0;
}
static const struct amdgpu_irq_src_funcs nbio_v7_9_ras_controller_irq_funcs = {
.set = nbio_v7_9_set_ras_controller_irq_state,
.process = nbio_v7_9_process_ras_controller_irq,
};
static const struct amdgpu_irq_src_funcs nbio_v7_9_ras_err_event_athub_irq_funcs = {
.set = nbio_v7_9_set_ras_err_event_athub_irq_state,
.process = nbio_v7_9_process_err_event_athub_irq,
};
static int nbio_v7_9_init_ras_controller_interrupt (struct amdgpu_device *adev)
{
int r;
/* init the irq funcs */
adev->nbio.ras_controller_irq.funcs =
&nbio_v7_9_ras_controller_irq_funcs;
adev->nbio.ras_controller_irq.num_types = 1;
/* register ras controller interrupt */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT,
&adev->nbio.ras_controller_irq);
return r;
}
static int nbio_v7_9_init_ras_err_event_athub_interrupt (struct amdgpu_device *adev)
{
int r;
/* init the irq funcs */
adev->nbio.ras_err_event_athub_irq.funcs =
&nbio_v7_9_ras_err_event_athub_irq_funcs;
adev->nbio.ras_err_event_athub_irq.num_types = 1;
/* register ras err event athub interrupt */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT,
&adev->nbio.ras_err_event_athub_irq);
return r;
}
const struct amdgpu_ras_block_hw_ops nbio_v7_9_ras_hw_ops = {
.query_ras_error_count = nbio_v7_9_query_ras_error_count,
};
struct amdgpu_nbio_ras nbio_v7_9_ras = {
.ras_block = {
.ras_comm = {
.name = "pcie_bif",
.block = AMDGPU_RAS_BLOCK__PCIE_BIF,
.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
},
.hw_ops = &nbio_v7_9_ras_hw_ops,
.ras_late_init = amdgpu_nbio_ras_late_init,
},
.handle_ras_controller_intr_no_bifring = nbio_v7_9_handle_ras_controller_intr_no_bifring,
.handle_ras_err_event_athub_intr_no_bifring = nbio_v7_9_handle_ras_err_event_athub_intr_no_bifring,
.init_ras_controller_interrupt = nbio_v7_9_init_ras_controller_interrupt,
.init_ras_err_event_athub_interrupt = nbio_v7_9_init_ras_err_event_athub_interrupt,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c |
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "nbio/nbio_6_1_offset.h"
#include "nbio/nbio_6_1_sh_mask.h"
#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"
#include "mp/mp_9_0_offset.h"
#include "soc15.h"
#include "vega10_ih.h"
#include "soc15_common.h"
#include "mxgpu_ai.h"
#include "amdgpu_reset.h"
static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev)
{
WREG8(AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
}
static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val)
{
WREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
}
/*
* this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
* RCV_MSG_VALID filed of BIF_BX_PF0_MAILBOX_CONTROL must already be set to 1
* by host.
*
* if called no in IRQ routine, this peek_msg cannot guaranteed to return the
* correct value since it doesn't return the RCV_DW0 under the case that
* RCV_MSG_VALID is set by host.
*/
static enum idh_event xgpu_ai_mailbox_peek_msg(struct amdgpu_device *adev)
{
return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
}
static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
enum idh_event event)
{
u32 reg;
reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
if (reg != event)
return -ENOENT;
xgpu_ai_mailbox_send_ack(adev);
return 0;
}
static uint8_t xgpu_ai_peek_ack(struct amdgpu_device *adev) {
return RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
}
static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
{
int timeout = AI_MAILBOX_POLL_ACK_TIMEDOUT;
u8 reg;
do {
reg = RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
if (reg & 2)
return 0;
mdelay(5);
timeout -= 5;
} while (timeout > 1);
pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", AI_MAILBOX_POLL_ACK_TIMEDOUT);
return -ETIME;
}
static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
{
int r, timeout = AI_MAILBOX_POLL_MSG_TIMEDOUT;
do {
r = xgpu_ai_mailbox_rcv_msg(adev, event);
if (!r)
return 0;
msleep(10);
timeout -= 10;
} while (timeout > 1);
pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
return -ETIME;
}
static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
enum idh_request req, u32 data1, u32 data2, u32 data3) {
u32 reg;
int r;
uint8_t trn;
/* IMPORTANT:
* clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
* and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
* which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_ai_poll_ack()
* will return immediatly
*/
do {
xgpu_ai_mailbox_set_valid(adev, false);
trn = xgpu_ai_peek_ack(adev);
if (trn) {
pr_err("trn=%x ACK should not assert! wait again !\n", trn);
msleep(1);
}
} while(trn);
reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0,
MSGBUF_DATA, req);
WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0),
reg);
WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1),
data1);
WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2),
data2);
WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3),
data3);
xgpu_ai_mailbox_set_valid(adev, true);
/* start to poll ack */
r = xgpu_ai_poll_ack(adev);
if (r)
pr_err("Doesn't get ack from pf, continue\n");
xgpu_ai_mailbox_set_valid(adev, false);
}
static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
enum idh_request req)
{
int r;
xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
/* start to check msg if request is idh_req_gpu_init_access */
if (req == IDH_REQ_GPU_INIT_ACCESS ||
req == IDH_REQ_GPU_FINI_ACCESS ||
req == IDH_REQ_GPU_RESET_ACCESS) {
r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
if (r) {
pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
return r;
}
/* Retrieve checksum from mailbox2 */
if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
adev->virt.fw_reserve.checksum_key =
RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
}
} else if (req == IDH_REQ_GPU_INIT_DATA){
/* Dummy REQ_GPU_INIT_DATA handling */
r = xgpu_ai_poll_msg(adev, IDH_REQ_GPU_INIT_DATA_READY);
/* version set to 0 since dummy */
adev->virt.req_init_data_ver = 0;
}
return 0;
}
static int xgpu_ai_request_reset(struct amdgpu_device *adev)
{
int ret, i = 0;
while (i < AI_MAILBOX_POLL_MSG_REP_MAX) {
ret = xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
if (!ret)
break;
i++;
}
return ret;
}
static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev,
bool init)
{
enum idh_request req;
req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
return xgpu_ai_send_access_requests(adev, req);
}
static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev,
bool init)
{
enum idh_request req;
int r = 0;
req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
r = xgpu_ai_send_access_requests(adev, req);
return r;
}
static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
DRM_DEBUG("get ack intr and do nothing.\n");
return 0;
}
static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN,
(state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
return 0;
}
static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
{
struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT;
/* block amdgpu_gpu_recover till msg FLR COMPLETE received,
* otherwise the mailbox msg will be ruined/reseted by
* the VF FLR.
*/
if (atomic_cmpxchg(&adev->reset_domain->in_gpu_reset, 0, 1) != 0)
return;
down_write(&adev->reset_domain->sem);
amdgpu_virt_fini_data_exchange(adev);
xgpu_ai_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
do {
if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
goto flr_done;
msleep(10);
timeout -= 10;
} while (timeout > 1);
flr_done:
atomic_set(&adev->reset_domain->in_gpu_reset, 0);
up_write(&adev->reset_domain->sem);
/* Trigger recovery for world switch failure if no TDR */
if (amdgpu_device_should_recover_gpu(adev)
&& (!amdgpu_device_has_job_running(adev) ||
adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT)) {
struct amdgpu_reset_context reset_context;
memset(&reset_context, 0, sizeof(reset_context));
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
}
}
static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
{
u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN,
(state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
return 0;
}
static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
enum idh_event event = xgpu_ai_mailbox_peek_msg(adev);
switch (event) {
case IDH_FLR_NOTIFICATION:
if (amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev))
WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
&adev->virt.flr_work),
"Failed to queue work! at %s",
__func__);
break;
case IDH_QUERY_ALIVE:
xgpu_ai_mailbox_send_ack(adev);
break;
/* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
* it byfar since that polling thread will handle it,
* other msg like flr complete is not handled here.
*/
case IDH_CLR_MSG_BUF:
case IDH_FLR_NOTIFICATION_CMPL:
case IDH_READY_TO_ACCESS_GPU:
default:
break;
}
return 0;
}
static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = {
.set = xgpu_ai_set_mailbox_ack_irq,
.process = xgpu_ai_mailbox_ack_irq,
};
static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = {
.set = xgpu_ai_set_mailbox_rcv_irq,
.process = xgpu_ai_mailbox_rcv_irq,
};
void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev)
{
adev->virt.ack_irq.num_types = 1;
adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs;
adev->virt.rcv_irq.num_types = 1;
adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs;
}
int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev)
{
int r;
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
if (r)
return r;
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
if (r) {
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
return r;
}
return 0;
}
int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
{
int r;
r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
if (r)
return r;
r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
if (r) {
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
return r;
}
INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
return 0;
}
void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
{
amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
}
static int xgpu_ai_request_init_data(struct amdgpu_device *adev)
{
return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
}
static void xgpu_ai_ras_poison_handler(struct amdgpu_device *adev)
{
xgpu_ai_send_access_requests(adev, IDH_RAS_POISON);
}
const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.req_full_gpu = xgpu_ai_request_full_gpu_access,
.rel_full_gpu = xgpu_ai_release_full_gpu_access,
.reset_gpu = xgpu_ai_request_reset,
.wait_reset = NULL,
.trans_msg = xgpu_ai_mailbox_trans_msg,
.req_init_data = xgpu_ai_request_init_data,
.ras_poison_handler = xgpu_ai_ras_poison_handler,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c |
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/slab.h>
#include <drm/drm_print.h>
#include "amdgpu_ring_mux.h"
#include "amdgpu_ring.h"
#include "amdgpu.h"
#define AMDGPU_MUX_RESUBMIT_JIFFIES_TIMEOUT (HZ / 2)
#define AMDGPU_MAX_LAST_UNSIGNALED_THRESHOLD_US 10000
static const struct ring_info {
unsigned int hw_pio;
const char *ring_name;
} sw_ring_info[] = {
{ AMDGPU_RING_PRIO_DEFAULT, "gfx_low"},
{ AMDGPU_RING_PRIO_2, "gfx_high"},
};
static struct kmem_cache *amdgpu_mux_chunk_slab;
static inline struct amdgpu_mux_entry *amdgpu_ring_mux_sw_entry(struct amdgpu_ring_mux *mux,
struct amdgpu_ring *ring)
{
return ring->entry_index < mux->ring_entry_size ?
&mux->ring_entry[ring->entry_index] : NULL;
}
/* copy packages on sw ring range[begin, end) */
static void amdgpu_ring_mux_copy_pkt_from_sw_ring(struct amdgpu_ring_mux *mux,
struct amdgpu_ring *ring,
u64 s_start, u64 s_end)
{
u64 start, end;
struct amdgpu_ring *real_ring = mux->real_ring;
start = s_start & ring->buf_mask;
end = s_end & ring->buf_mask;
if (start == end) {
DRM_ERROR("no more data copied from sw ring\n");
return;
}
if (start > end) {
amdgpu_ring_alloc(real_ring, (ring->ring_size >> 2) + end - start);
amdgpu_ring_write_multiple(real_ring, (void *)&ring->ring[start],
(ring->ring_size >> 2) - start);
amdgpu_ring_write_multiple(real_ring, (void *)&ring->ring[0], end);
} else {
amdgpu_ring_alloc(real_ring, end - start);
amdgpu_ring_write_multiple(real_ring, (void *)&ring->ring[start], end - start);
}
}
static void amdgpu_mux_resubmit_chunks(struct amdgpu_ring_mux *mux)
{
struct amdgpu_mux_entry *e = NULL;
struct amdgpu_mux_chunk *chunk;
uint32_t seq, last_seq;
int i;
/*find low priority entries:*/
if (!mux->s_resubmit)
return;
for (i = 0; i < mux->num_ring_entries; i++) {
if (mux->ring_entry[i].ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT) {
e = &mux->ring_entry[i];
break;
}
}
if (!e) {
DRM_ERROR("%s no low priority ring found\n", __func__);
return;
}
last_seq = atomic_read(&e->ring->fence_drv.last_seq);
seq = mux->seqno_to_resubmit;
if (last_seq < seq) {
/*resubmit all the fences between (last_seq, seq]*/
list_for_each_entry(chunk, &e->list, entry) {
if (chunk->sync_seq > last_seq && chunk->sync_seq <= seq) {
amdgpu_fence_update_start_timestamp(e->ring,
chunk->sync_seq,
ktime_get());
if (chunk->sync_seq ==
le32_to_cpu(*(e->ring->fence_drv.cpu_addr + 2))) {
if (chunk->cntl_offset <= e->ring->buf_mask)
amdgpu_ring_patch_cntl(e->ring,
chunk->cntl_offset);
if (chunk->ce_offset <= e->ring->buf_mask)
amdgpu_ring_patch_ce(e->ring, chunk->ce_offset);
if (chunk->de_offset <= e->ring->buf_mask)
amdgpu_ring_patch_de(e->ring, chunk->de_offset);
}
amdgpu_ring_mux_copy_pkt_from_sw_ring(mux, e->ring,
chunk->start,
chunk->end);
mux->wptr_resubmit = chunk->end;
amdgpu_ring_commit(mux->real_ring);
}
}
}
del_timer(&mux->resubmit_timer);
mux->s_resubmit = false;
}
static void amdgpu_ring_mux_schedule_resubmit(struct amdgpu_ring_mux *mux)
{
mod_timer(&mux->resubmit_timer, jiffies + AMDGPU_MUX_RESUBMIT_JIFFIES_TIMEOUT);
}
static void amdgpu_mux_resubmit_fallback(struct timer_list *t)
{
struct amdgpu_ring_mux *mux = from_timer(mux, t, resubmit_timer);
if (!spin_trylock(&mux->lock)) {
amdgpu_ring_mux_schedule_resubmit(mux);
DRM_ERROR("reschedule resubmit\n");
return;
}
amdgpu_mux_resubmit_chunks(mux);
spin_unlock(&mux->lock);
}
int amdgpu_ring_mux_init(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring,
unsigned int entry_size)
{
mux->real_ring = ring;
mux->num_ring_entries = 0;
mux->ring_entry = kcalloc(entry_size, sizeof(struct amdgpu_mux_entry), GFP_KERNEL);
if (!mux->ring_entry)
return -ENOMEM;
mux->ring_entry_size = entry_size;
mux->s_resubmit = false;
amdgpu_mux_chunk_slab = kmem_cache_create("amdgpu_mux_chunk",
sizeof(struct amdgpu_mux_chunk), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!amdgpu_mux_chunk_slab) {
DRM_ERROR("create amdgpu_mux_chunk cache failed\n");
return -ENOMEM;
}
spin_lock_init(&mux->lock);
timer_setup(&mux->resubmit_timer, amdgpu_mux_resubmit_fallback, 0);
return 0;
}
void amdgpu_ring_mux_fini(struct amdgpu_ring_mux *mux)
{
struct amdgpu_mux_entry *e;
struct amdgpu_mux_chunk *chunk, *chunk2;
int i;
for (i = 0; i < mux->num_ring_entries; i++) {
e = &mux->ring_entry[i];
list_for_each_entry_safe(chunk, chunk2, &e->list, entry) {
list_del(&chunk->entry);
kmem_cache_free(amdgpu_mux_chunk_slab, chunk);
}
}
kmem_cache_destroy(amdgpu_mux_chunk_slab);
kfree(mux->ring_entry);
mux->ring_entry = NULL;
mux->num_ring_entries = 0;
mux->ring_entry_size = 0;
}
int amdgpu_ring_mux_add_sw_ring(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
{
struct amdgpu_mux_entry *e;
if (mux->num_ring_entries >= mux->ring_entry_size) {
DRM_ERROR("add sw ring exceeding max entry size\n");
return -ENOENT;
}
e = &mux->ring_entry[mux->num_ring_entries];
ring->entry_index = mux->num_ring_entries;
e->ring = ring;
INIT_LIST_HEAD(&e->list);
mux->num_ring_entries += 1;
return 0;
}
void amdgpu_ring_mux_set_wptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring, u64 wptr)
{
struct amdgpu_mux_entry *e;
spin_lock(&mux->lock);
if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT)
amdgpu_mux_resubmit_chunks(mux);
e = amdgpu_ring_mux_sw_entry(mux, ring);
if (!e) {
DRM_ERROR("cannot find entry for sw ring\n");
spin_unlock(&mux->lock);
return;
}
/* We could skip this set wptr as preemption in process. */
if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT && mux->pending_trailing_fence_signaled) {
spin_unlock(&mux->lock);
return;
}
e->sw_cptr = e->sw_wptr;
/* Update cptr if the package already copied in resubmit functions */
if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT && e->sw_cptr < mux->wptr_resubmit)
e->sw_cptr = mux->wptr_resubmit;
e->sw_wptr = wptr;
e->start_ptr_in_hw_ring = mux->real_ring->wptr;
/* Skip copying for the packages already resubmitted.*/
if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT || mux->wptr_resubmit < wptr) {
amdgpu_ring_mux_copy_pkt_from_sw_ring(mux, ring, e->sw_cptr, wptr);
e->end_ptr_in_hw_ring = mux->real_ring->wptr;
amdgpu_ring_commit(mux->real_ring);
} else {
e->end_ptr_in_hw_ring = mux->real_ring->wptr;
}
spin_unlock(&mux->lock);
}
u64 amdgpu_ring_mux_get_wptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
{
struct amdgpu_mux_entry *e;
e = amdgpu_ring_mux_sw_entry(mux, ring);
if (!e) {
DRM_ERROR("cannot find entry for sw ring\n");
return 0;
}
return e->sw_wptr;
}
/**
* amdgpu_ring_mux_get_rptr - get the readptr of the software ring
* @mux: the multiplexer the software rings attach to
* @ring: the software ring of which we calculate the readptr
*
* The return value of the readptr is not precise while the other rings could
* write data onto the real ring buffer.After overwriting on the real ring, we
* can not decide if our packages have been excuted or not read yet. However,
* this function is only called by the tools such as umr to collect the latest
* packages for the hang analysis. We assume the hang happens near our latest
* submit. Thus we could use the following logic to give the clue:
* If the readptr is between start and end, then we return the copy pointer
* plus the distance from start to readptr. If the readptr is before start, we
* return the copy pointer. Lastly, if the readptr is past end, we return the
* write pointer.
*/
u64 amdgpu_ring_mux_get_rptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
{
struct amdgpu_mux_entry *e;
u64 readp, offset, start, end;
e = amdgpu_ring_mux_sw_entry(mux, ring);
if (!e) {
DRM_ERROR("no sw entry found!\n");
return 0;
}
readp = amdgpu_ring_get_rptr(mux->real_ring);
start = e->start_ptr_in_hw_ring & mux->real_ring->buf_mask;
end = e->end_ptr_in_hw_ring & mux->real_ring->buf_mask;
if (start > end) {
if (readp <= end)
readp += mux->real_ring->ring_size >> 2;
end += mux->real_ring->ring_size >> 2;
}
if (start <= readp && readp <= end) {
offset = readp - start;
e->sw_rptr = (e->sw_cptr + offset) & ring->buf_mask;
} else if (readp < start) {
e->sw_rptr = e->sw_cptr;
} else {
/* end < readptr */
e->sw_rptr = e->sw_wptr;
}
return e->sw_rptr;
}
u64 amdgpu_sw_ring_get_rptr_gfx(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
WARN_ON(!ring->is_sw_ring);
return amdgpu_ring_mux_get_rptr(mux, ring);
}
u64 amdgpu_sw_ring_get_wptr_gfx(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
WARN_ON(!ring->is_sw_ring);
return amdgpu_ring_mux_get_wptr(mux, ring);
}
void amdgpu_sw_ring_set_wptr_gfx(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
WARN_ON(!ring->is_sw_ring);
amdgpu_ring_mux_set_wptr(mux, ring, ring->wptr);
}
/* Override insert_nop to prevent emitting nops to the software rings */
void amdgpu_sw_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
WARN_ON(!ring->is_sw_ring);
}
const char *amdgpu_sw_ring_name(int idx)
{
return idx < ARRAY_SIZE(sw_ring_info) ?
sw_ring_info[idx].ring_name : NULL;
}
unsigned int amdgpu_sw_ring_priority(int idx)
{
return idx < ARRAY_SIZE(sw_ring_info) ?
sw_ring_info[idx].hw_pio : AMDGPU_RING_PRIO_DEFAULT;
}
/*Scan on low prio rings to have unsignaled fence and high ring has no fence.*/
static int amdgpu_mcbp_scan(struct amdgpu_ring_mux *mux)
{
struct amdgpu_ring *ring;
int i, need_preempt;
need_preempt = 0;
for (i = 0; i < mux->num_ring_entries; i++) {
ring = mux->ring_entry[i].ring;
if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT &&
amdgpu_fence_count_emitted(ring) > 0)
return 0;
if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT &&
amdgpu_fence_last_unsignaled_time_us(ring) >
AMDGPU_MAX_LAST_UNSIGNALED_THRESHOLD_US)
need_preempt = 1;
}
return need_preempt && !mux->s_resubmit;
}
/* Trigger Mid-Command Buffer Preemption (MCBP) and find if we need to resubmit. */
static int amdgpu_mcbp_trigger_preempt(struct amdgpu_ring_mux *mux)
{
int r;
spin_lock(&mux->lock);
mux->pending_trailing_fence_signaled = true;
r = amdgpu_ring_preempt_ib(mux->real_ring);
spin_unlock(&mux->lock);
return r;
}
void amdgpu_sw_ring_ib_begin(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
WARN_ON(!ring->is_sw_ring);
if (adev->gfx.mcbp && ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT) {
if (amdgpu_mcbp_scan(mux) > 0)
amdgpu_mcbp_trigger_preempt(mux);
return;
}
amdgpu_ring_mux_start_ib(mux, ring);
}
void amdgpu_sw_ring_ib_end(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
WARN_ON(!ring->is_sw_ring);
if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT)
return;
amdgpu_ring_mux_end_ib(mux, ring);
}
void amdgpu_sw_ring_ib_mark_offset(struct amdgpu_ring *ring, enum amdgpu_ring_mux_offset_type type)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
unsigned offset;
if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT)
return;
offset = ring->wptr & ring->buf_mask;
amdgpu_ring_mux_ib_mark_offset(mux, ring, offset, type);
}
void amdgpu_ring_mux_start_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
{
struct amdgpu_mux_entry *e;
struct amdgpu_mux_chunk *chunk;
spin_lock(&mux->lock);
amdgpu_mux_resubmit_chunks(mux);
spin_unlock(&mux->lock);
e = amdgpu_ring_mux_sw_entry(mux, ring);
if (!e) {
DRM_ERROR("cannot find entry!\n");
return;
}
chunk = kmem_cache_alloc(amdgpu_mux_chunk_slab, GFP_KERNEL);
if (!chunk) {
DRM_ERROR("alloc amdgpu_mux_chunk_slab failed\n");
return;
}
chunk->start = ring->wptr;
/* the initialized value used to check if they are set by the ib submission*/
chunk->cntl_offset = ring->buf_mask + 1;
chunk->de_offset = ring->buf_mask + 1;
chunk->ce_offset = ring->buf_mask + 1;
list_add_tail(&chunk->entry, &e->list);
}
static void scan_and_remove_signaled_chunk(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
{
uint32_t last_seq = 0;
struct amdgpu_mux_entry *e;
struct amdgpu_mux_chunk *chunk, *tmp;
e = amdgpu_ring_mux_sw_entry(mux, ring);
if (!e) {
DRM_ERROR("cannot find entry!\n");
return;
}
last_seq = atomic_read(&ring->fence_drv.last_seq);
list_for_each_entry_safe(chunk, tmp, &e->list, entry) {
if (chunk->sync_seq <= last_seq) {
list_del(&chunk->entry);
kmem_cache_free(amdgpu_mux_chunk_slab, chunk);
}
}
}
void amdgpu_ring_mux_ib_mark_offset(struct amdgpu_ring_mux *mux,
struct amdgpu_ring *ring, u64 offset,
enum amdgpu_ring_mux_offset_type type)
{
struct amdgpu_mux_entry *e;
struct amdgpu_mux_chunk *chunk;
e = amdgpu_ring_mux_sw_entry(mux, ring);
if (!e) {
DRM_ERROR("cannot find entry!\n");
return;
}
chunk = list_last_entry(&e->list, struct amdgpu_mux_chunk, entry);
if (!chunk) {
DRM_ERROR("cannot find chunk!\n");
return;
}
switch (type) {
case AMDGPU_MUX_OFFSET_TYPE_CONTROL:
chunk->cntl_offset = offset;
break;
case AMDGPU_MUX_OFFSET_TYPE_DE:
chunk->de_offset = offset;
break;
case AMDGPU_MUX_OFFSET_TYPE_CE:
chunk->ce_offset = offset;
break;
default:
DRM_ERROR("invalid type (%d)\n", type);
break;
}
}
void amdgpu_ring_mux_end_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
{
struct amdgpu_mux_entry *e;
struct amdgpu_mux_chunk *chunk;
e = amdgpu_ring_mux_sw_entry(mux, ring);
if (!e) {
DRM_ERROR("cannot find entry!\n");
return;
}
chunk = list_last_entry(&e->list, struct amdgpu_mux_chunk, entry);
if (!chunk) {
DRM_ERROR("cannot find chunk!\n");
return;
}
chunk->end = ring->wptr;
chunk->sync_seq = READ_ONCE(ring->fence_drv.sync_seq);
scan_and_remove_signaled_chunk(mux, ring);
}
bool amdgpu_mcbp_handle_trailing_fence_irq(struct amdgpu_ring_mux *mux)
{
struct amdgpu_mux_entry *e;
struct amdgpu_ring *ring = NULL;
int i;
if (!mux->pending_trailing_fence_signaled)
return false;
if (mux->real_ring->trail_seq != le32_to_cpu(*mux->real_ring->trail_fence_cpu_addr))
return false;
for (i = 0; i < mux->num_ring_entries; i++) {
e = &mux->ring_entry[i];
if (e->ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT) {
ring = e->ring;
break;
}
}
if (!ring) {
DRM_ERROR("cannot find low priority ring\n");
return false;
}
amdgpu_fence_process(ring);
if (amdgpu_fence_count_emitted(ring) > 0) {
mux->s_resubmit = true;
mux->seqno_to_resubmit = ring->fence_drv.sync_seq;
amdgpu_ring_mux_schedule_resubmit(mux);
}
mux->pending_trailing_fence_signaled = false;
return true;
}
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c |
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
* * Author: [email protected]
*/
#include <drm/drm_exec.h>
#include "amdgpu.h"
uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
{
uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
addr -= AMDGPU_VA_RESERVED_SIZE;
addr = amdgpu_gmc_sign_extend(addr);
return addr;
}
int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo,
u32 domain, uint32_t size)
{
void *ptr;
amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
domain, bo,
NULL, &ptr);
if (!*bo)
return -ENOMEM;
memset(ptr, 0, size);
adev->virt.csa_cpu_addr = ptr;
return 0;
}
void amdgpu_free_static_csa(struct amdgpu_bo **bo)
{
amdgpu_bo_free_kernel(bo, NULL, NULL);
}
/*
* amdgpu_map_static_csa should be called during amdgpu_vm_init
* it maps virtual address amdgpu_csa_vaddr() to this VM, and each command
* submission of GFX should use this virtual address within META_DATA init
* package to support SRIOV gfx preemption.
*/
int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
uint64_t csa_addr, uint32_t size)
{
struct drm_exec exec;
int r;
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
drm_exec_until_all_locked(&exec) {
r = amdgpu_vm_lock_pd(vm, &exec, 0);
if (likely(!r))
r = drm_exec_lock_obj(&exec, &bo->tbo.base);
drm_exec_retry_on_contention(&exec);
if (unlikely(r)) {
DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
goto error;
}
}
*bo_va = amdgpu_vm_bo_add(adev, vm, bo);
if (!*bo_va) {
r = -ENOMEM;
goto error;
}
r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
AMDGPU_PTE_EXECUTABLE);
if (r) {
DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
amdgpu_vm_bo_del(adev, *bo_va);
goto error;
}
error:
drm_exec_fini(&exec);
return r;
}
int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo *bo, struct amdgpu_bo_va *bo_va,
uint64_t csa_addr)
{
struct drm_exec exec;
int r;
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
drm_exec_until_all_locked(&exec) {
r = amdgpu_vm_lock_pd(vm, &exec, 0);
if (likely(!r))
r = drm_exec_lock_obj(&exec, &bo->tbo.base);
drm_exec_retry_on_contention(&exec);
if (unlikely(r)) {
DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
goto error;
}
}
r = amdgpu_vm_bo_unmap(adev, bo_va, csa_addr);
if (r) {
DRM_ERROR("failed to do bo_unmap on static CSA, err=%d\n", r);
goto error;
}
amdgpu_vm_bo_del(adev, bo_va);
error:
drm_exec_fini(&exec);
return r;
}
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c |
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "amdgpu_vm.h"
#include "amdgpu_object.h"
#include "amdgpu_trace.h"
/**
* amdgpu_vm_cpu_map_table - make sure new PDs/PTs are kmapped
*
* @table: newly allocated or validated PD/PT
*/
static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table)
{
table->bo.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
return amdgpu_bo_kmap(&table->bo, NULL);
}
/**
* amdgpu_vm_cpu_prepare - prepare page table update with the CPU
*
* @p: see amdgpu_vm_update_params definition
* @resv: reservation object with embedded fence
* @sync_mode: synchronization mode
*
* Returns:
* Negativ errno, 0 for success.
*/
static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
struct dma_resv *resv,
enum amdgpu_sync_mode sync_mode)
{
if (!resv)
return 0;
return amdgpu_bo_sync_wait_resv(p->adev, resv, sync_mode, p->vm, true);
}
/**
* amdgpu_vm_cpu_update - helper to update page tables via CPU
*
* @p: see amdgpu_vm_update_params definition
* @vmbo: PD/PT to update
* @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: hw access flags
*
* Write count number of PT/PD entries directly.
*/
static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
struct amdgpu_bo_vm *vmbo, uint64_t pe,
uint64_t addr, unsigned count, uint32_t incr,
uint64_t flags)
{
unsigned int i;
uint64_t value;
long r;
r = dma_resv_wait_timeout(vmbo->bo.tbo.base.resv, DMA_RESV_USAGE_KERNEL,
true, MAX_SCHEDULE_TIMEOUT);
if (r < 0)
return r;
pe += (unsigned long)amdgpu_bo_kptr(&vmbo->bo);
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);
for (i = 0; i < count; i++) {
value = p->pages_addr ?
amdgpu_vm_map_gart(p->pages_addr, addr) :
addr;
amdgpu_gmc_set_pte_pde(p->adev, (void *)(uintptr_t)pe,
i, value, flags);
addr += incr;
}
return 0;
}
/**
* amdgpu_vm_cpu_commit - commit page table update to the HW
*
* @p: see amdgpu_vm_update_params definition
* @fence: unused
*
* Make sure that the hardware sees the page table updates.
*/
static int amdgpu_vm_cpu_commit(struct amdgpu_vm_update_params *p,
struct dma_fence **fence)
{
/* Flush HDP */
mb();
amdgpu_device_flush_hdp(p->adev, NULL);
return 0;
}
const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs = {
.map_table = amdgpu_vm_cpu_map_table,
.prepare = amdgpu_vm_cpu_prepare,
.update = amdgpu_vm_cpu_update,
.commit = amdgpu_vm_cpu_commit
};
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c |
/*
* Copyright 2016 Advanced Micro Devices, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
#include <linux/firmware.h>
#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_vce.h"
#include "soc15.h"
#include "soc15d.h"
#include "soc15_common.h"
#include "mmsch_v1_0.h"
#include "vce/vce_4_0_offset.h"
#include "vce/vce_4_0_default.h"
#include "vce/vce_4_0_sh_mask.h"
#include "mmhub/mmhub_1_0_offset.h"
#include "mmhub/mmhub_1_0_sh_mask.h"
#include "ivsrcid/vce/irqsrcs_vce_4_0.h"
#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
#define VCE_V4_0_FW_SIZE (384 * 1024)
#define VCE_V4_0_STACK_SIZE (64 * 1024)
#define VCE_V4_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
static void vce_v4_0_mc_resume(struct amdgpu_device *adev);
static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev);
static void vce_v4_0_set_irq_funcs(struct amdgpu_device *adev);
/**
* vce_v4_0_ring_get_rptr - get read pointer
*
* @ring: amdgpu_ring pointer
*
* Returns the current hardware read pointer
*/
static uint64_t vce_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring->me == 0)
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR));
else if (ring->me == 1)
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR2));
else
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR3));
}
/**
* vce_v4_0_ring_get_wptr - get write pointer
*
* @ring: amdgpu_ring pointer
*
* Returns the current hardware write pointer
*/
static uint64_t vce_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring->use_doorbell)
return *ring->wptr_cpu_addr;
if (ring->me == 0)
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR));
else if (ring->me == 1)
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2));
else
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3));
}
/**
* vce_v4_0_ring_set_wptr - set write pointer
*
* @ring: amdgpu_ring pointer
*
* Commits the write pointer to the hardware
*/
static void vce_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring->use_doorbell) {
/* XXX check if swapping is necessary on BE */
*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
return;
}
if (ring->me == 0)
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR),
lower_32_bits(ring->wptr));
else if (ring->me == 1)
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2),
lower_32_bits(ring->wptr));
else
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3),
lower_32_bits(ring->wptr));
}
static int vce_v4_0_firmware_loaded(struct amdgpu_device *adev)
{
int i, j;
for (i = 0; i < 10; ++i) {
for (j = 0; j < 100; ++j) {
uint32_t status =
RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS));
if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
return 0;
mdelay(10);
}
DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET),
VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
mdelay(10);
WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET), 0,
~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
mdelay(10);
}
return -ETIMEDOUT;
}
static int vce_v4_0_mmsch_start(struct amdgpu_device *adev,
struct amdgpu_mm_table *table)
{
uint32_t data = 0, loop;
uint64_t addr = table->gpu_addr;
struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
uint32_t size;
size = header->header_size + header->vce_table_size + header->uvd_table_size;
/* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO), lower_32_bits(addr));
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI), upper_32_bits(addr));
/* 2, update vmid of descriptor */
data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_VMID));
data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_VMID), data);
/* 3, notify mmsch about the size of this descriptor */
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE), size);
/* 4, set resp to zero */
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP), 0);
WDOORBELL32(adev->vce.ring[0].doorbell_index, 0);
*adev->vce.ring[0].wptr_cpu_addr = 0;
adev->vce.ring[0].wptr = 0;
adev->vce.ring[0].wptr_old = 0;
/* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST), 0x10000001);
data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP));
loop = 1000;
while ((data & 0x10000002) != 0x10000002) {
udelay(10);
data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP));
loop--;
if (!loop)
break;
}
if (!loop) {
dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
return -EBUSY;
}
return 0;
}
static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
uint32_t offset, size;
uint32_t table_size = 0;
struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
struct mmsch_v1_0_cmd_direct_polling direct_poll = { { 0 } };
struct mmsch_v1_0_cmd_end end = { { 0 } };
uint32_t *init_table = adev->virt.mm_table.cpu_addr;
struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
end.cmd_header.command_type = MMSCH_COMMAND__END;
if (header->vce_table_offset == 0 && header->vce_table_size == 0) {
header->version = MMSCH_VERSION;
header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
if (header->uvd_table_offset == 0 && header->uvd_table_size == 0)
header->vce_table_offset = header->header_size;
else
header->vce_table_offset = header->uvd_table_size + header->uvd_table_offset;
init_table += header->vce_table_offset;
ring = &adev->vce.ring[0];
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO),
lower_32_bits(ring->gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI),
upper_32_bits(ring->gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE),
ring->ring_size / 4);
/* BEGING OF MC_RESUME */
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL), 0x398000);
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CACHE_CTRL), ~0x1, 0);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL), 0);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0);
offset = AMDGPU_VCE_FIRMWARE_OFFSET;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
uint32_t low = adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_lo;
uint32_t hi = adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_hi;
uint64_t tmr_mc_addr = (uint64_t)(hi) << 32 | low;
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
mmVCE_LMI_VCPU_CACHE_40BIT_BAR0), tmr_mc_addr >> 8);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
(tmr_mc_addr >> 40) & 0xff);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), 0);
} else {
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
adev->vce.gpu_addr >> 8);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
(adev->vce.gpu_addr >> 40) & 0xff);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0),
offset & ~0x0f000000);
}
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
mmVCE_LMI_VCPU_CACHE_40BIT_BAR1),
adev->vce.gpu_addr >> 8);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
mmVCE_LMI_VCPU_CACHE_64BIT_BAR1),
(adev->vce.gpu_addr >> 40) & 0xff);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
mmVCE_LMI_VCPU_CACHE_40BIT_BAR2),
adev->vce.gpu_addr >> 8);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
mmVCE_LMI_VCPU_CACHE_64BIT_BAR2),
(adev->vce.gpu_addr >> 40) & 0xff);
size = VCE_V4_0_FW_SIZE;
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size);
offset = (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) ? offset + size : 0;
size = VCE_V4_0_STACK_SIZE;
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET1),
(offset & ~0x0f000000) | (1 << 24));
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE1), size);
offset += size;
size = VCE_V4_0_DATA_SIZE;
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET2),
(offset & ~0x0f000000) | (2 << 24));
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE2), size);
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), ~0x100, 0);
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN),
VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
/* end of MC_RESUME */
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
VCE_STATUS__JOB_BUSY_MASK, ~VCE_STATUS__JOB_BUSY_MASK);
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL),
~0x200001, VCE_VCPU_CNTL__CLK_EN_MASK);
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET),
~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, 0);
MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK,
VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK);
/* clear BUSY flag */
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
~VCE_STATUS__JOB_BUSY_MASK, 0);
/* add end packet */
memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
header->vce_table_size = table_size;
}
return vce_v4_0_mmsch_start(adev, &adev->virt.mm_table);
}
/**
* vce_v4_0_start - start VCE block
*
* @adev: amdgpu_device pointer
*
* Setup and start the VCE block
*/
static int vce_v4_0_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
int r;
ring = &adev->vce.ring[0];
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR), lower_32_bits(ring->wptr));
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR), lower_32_bits(ring->wptr));
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO), ring->gpu_addr);
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE), ring->ring_size / 4);
ring = &adev->vce.ring[1];
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR2), lower_32_bits(ring->wptr));
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2), lower_32_bits(ring->wptr));
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO2), ring->gpu_addr);
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI2), upper_32_bits(ring->gpu_addr));
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE2), ring->ring_size / 4);
ring = &adev->vce.ring[2];
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR3), lower_32_bits(ring->wptr));
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3), lower_32_bits(ring->wptr));
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO3), ring->gpu_addr);
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI3), upper_32_bits(ring->gpu_addr));
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE3), ring->ring_size / 4);
vce_v4_0_mc_resume(adev);
WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), VCE_STATUS__JOB_BUSY_MASK,
~VCE_STATUS__JOB_BUSY_MASK);
WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL), 1, ~0x200001);
WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET), 0,
~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
mdelay(100);
r = vce_v4_0_firmware_loaded(adev);
/* clear BUSY flag */
WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0, ~VCE_STATUS__JOB_BUSY_MASK);
if (r) {
DRM_ERROR("VCE not responding, giving up!!!\n");
return r;
}
return 0;
}
static int vce_v4_0_stop(struct amdgpu_device *adev)
{
/* Disable VCPU */
WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL), 0, ~0x200001);
/* hold on ECPU */
WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET),
VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
/* clear VCE_STATUS */
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0);
/* Set Clock-Gating off */
/* if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
vce_v4_0_set_vce_sw_clock_gating(adev, false);
*/
return 0;
}
static int vce_v4_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (amdgpu_sriov_vf(adev)) /* currently only VCN0 support SRIOV */
adev->vce.num_rings = 1;
else
adev->vce.num_rings = 3;
vce_v4_0_set_ring_funcs(adev);
vce_v4_0_set_irq_funcs(adev);
return 0;
}
static int vce_v4_0_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
unsigned size;
int r, i;
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCE0, 167, &adev->vce.irq);
if (r)
return r;
size = VCE_V4_0_STACK_SIZE + VCE_V4_0_DATA_SIZE;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
size += VCE_V4_0_FW_SIZE;
r = amdgpu_vce_sw_init(adev, size);
if (r)
return r;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
const struct common_firmware_header *hdr;
unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
adev->vce.saved_bo = kvmalloc(size, GFP_KERNEL);
if (!adev->vce.saved_bo)
return -ENOMEM;
hdr = (const struct common_firmware_header *)adev->vce.fw->data;
adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].ucode_id = AMDGPU_UCODE_ID_VCE;
adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].fw = adev->vce.fw;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
DRM_INFO("PSP loading VCE firmware\n");
} else {
r = amdgpu_vce_resume(adev);
if (r)
return r;
}
for (i = 0; i < adev->vce.num_rings; i++) {
enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i);
ring = &adev->vce.ring[i];
ring->vm_hub = AMDGPU_MMHUB0(0);
sprintf(ring->name, "vce%d", i);
if (amdgpu_sriov_vf(adev)) {
/* DOORBELL only works under SRIOV */
ring->use_doorbell = true;
/* currently only use the first encoding ring for sriov,
* so set unused location for other unused rings.
*/
if (i == 0)
ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring0_1 * 2;
else
ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1;
}
r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
hw_prio, NULL);
if (r)
return r;
}
r = amdgpu_vce_entity_init(adev);
if (r)
return r;
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
return r;
return r;
}
static int vce_v4_0_sw_fini(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* free MM table */
amdgpu_virt_free_mm_table(adev);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
kvfree(adev->vce.saved_bo);
adev->vce.saved_bo = NULL;
}
r = amdgpu_vce_suspend(adev);
if (r)
return r;
return amdgpu_vce_sw_fini(adev);
}
static int vce_v4_0_hw_init(void *handle)
{
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (amdgpu_sriov_vf(adev))
r = vce_v4_0_sriov_start(adev);
else
r = vce_v4_0_start(adev);
if (r)
return r;
for (i = 0; i < adev->vce.num_rings; i++) {
r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
if (r)
return r;
}
DRM_INFO("VCE initialized successfully.\n");
return 0;
}
static int vce_v4_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
cancel_delayed_work_sync(&adev->vce.idle_work);
if (!amdgpu_sriov_vf(adev)) {
/* vce_v4_0_wait_for_idle(handle); */
vce_v4_0_stop(adev);
} else {
/* full access mode, so don't touch any VCE register */
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
}
return 0;
}
static int vce_v4_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r, idx;
if (adev->vce.vcpu_bo == NULL)
return 0;
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
void *ptr = adev->vce.cpu_addr;
memcpy_fromio(adev->vce.saved_bo, ptr, size);
}
drm_dev_exit(idx);
}
/*
* Proper cleanups before halting the HW engine:
* - cancel the delayed idle work
* - enable powergating
* - enable clockgating
* - disable dpm
*
* TODO: to align with the VCN implementation, move the
* jobs for clockgating/powergating/dpm setting to
* ->set_powergating_state().
*/
cancel_delayed_work_sync(&adev->vce.idle_work);
if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_vce(adev, false);
} else {
amdgpu_asic_set_vce_clocks(adev, 0, 0);
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_PG_STATE_GATE);
amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_GATE);
}
r = vce_v4_0_hw_fini(adev);
if (r)
return r;
return amdgpu_vce_suspend(adev);
}
static int vce_v4_0_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r, idx;
if (adev->vce.vcpu_bo == NULL)
return -EINVAL;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
void *ptr = adev->vce.cpu_addr;
memcpy_toio(ptr, adev->vce.saved_bo, size);
drm_dev_exit(idx);
}
} else {
r = amdgpu_vce_resume(adev);
if (r)
return r;
}
return vce_v4_0_hw_init(adev);
}
static void vce_v4_0_mc_resume(struct amdgpu_device *adev)
{
uint32_t offset, size;
uint64_t tmr_mc_addr;
WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A), 0, ~(1 << 16));
WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), 0x1FF000, ~0xFF9FF000);
WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), 0x3F, ~0x3F);
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), 0x1FF);
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL), 0x00398000);
WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CACHE_CTRL), 0x0, ~0x1);
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL), 0);
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0);
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0);
offset = AMDGPU_VCE_FIRMWARE_OFFSET;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
tmr_mc_addr = (uint64_t)(adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_hi) << 32 |
adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_lo;
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
(tmr_mc_addr >> 8));
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
(tmr_mc_addr >> 40) & 0xff);
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), 0);
} else {
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
(adev->vce.gpu_addr >> 8));
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
(adev->vce.gpu_addr >> 40) & 0xff);
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), offset & ~0x0f000000);
}
size = VCE_V4_0_FW_SIZE;
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size);
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR1), (adev->vce.gpu_addr >> 8));
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR1), (adev->vce.gpu_addr >> 40) & 0xff);
offset = (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) ? offset + size : 0;
size = VCE_V4_0_STACK_SIZE;
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET1), (offset & ~0x0f000000) | (1 << 24));
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE1), size);
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR2), (adev->vce.gpu_addr >> 8));
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR2), (adev->vce.gpu_addr >> 40) & 0xff);
offset += size;
size = VCE_V4_0_DATA_SIZE;
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET2), (offset & ~0x0f000000) | (2 << 24));
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE2), size);
WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), 0x0, ~0x100);
WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN),
VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
}
static int vce_v4_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
/* needed for driver unload*/
return 0;
}
#if 0
static bool vce_v4_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 mask = 0;
mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK;
mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK;
return !(RREG32(mmSRBM_STATUS2) & mask);
}
static int vce_v4_0_wait_for_idle(void *handle)
{
unsigned i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++)
if (vce_v4_0_is_idle(handle))
return 0;
return -ETIMEDOUT;
}
#define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */
#define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */
#define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */
#define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
static bool vce_v4_0_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
/* According to VCE team , we should use VCE_STATUS instead
* SRBM_STATUS.VCE_BUSY bit for busy status checking.
* GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE
* instance's registers are accessed
* (0 for 1st instance, 10 for 2nd instance).
*
*VCE_STATUS
*|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB |
*|----+----+-----------+----+----+----+----------+---------+----|
*|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0|
*
* VCE team suggest use bit 3--bit 6 for busy status check
*/
mutex_lock(&adev->grbm_idx_mutex);
WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
}
WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
}
WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
mutex_unlock(&adev->grbm_idx_mutex);
if (srbm_soft_reset) {
adev->vce.srbm_soft_reset = srbm_soft_reset;
return true;
} else {
adev->vce.srbm_soft_reset = 0;
return false;
}
}
static int vce_v4_0_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset;
if (!adev->vce.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->vce.srbm_soft_reset;
if (srbm_soft_reset) {
u32 tmp;
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(mmSRBM_SOFT_RESET, tmp);
tmp = RREG32(mmSRBM_SOFT_RESET);
udelay(50);
tmp &= ~srbm_soft_reset;
WREG32(mmSRBM_SOFT_RESET, tmp);
tmp = RREG32(mmSRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
}
return 0;
}
static int vce_v4_0_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->vce.srbm_soft_reset)
return 0;
mdelay(5);
return vce_v4_0_suspend(adev);
}
static int vce_v4_0_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->vce.srbm_soft_reset)
return 0;
mdelay(5);
return vce_v4_0_resume(adev);
}
static void vce_v4_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
{
u32 tmp, data;
tmp = data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL));
if (override)
data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
else
data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
if (tmp != data)
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL), data);
}
static void vce_v4_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
bool gated)
{
u32 data;
/* Set Override to disable Clock Gating */
vce_v4_0_override_vce_clock_gating(adev, true);
/* This function enables MGCG which is controlled by firmware.
With the clocks in the gated state the core is still
accessible but the firmware will throttle the clocks on the
fly as necessary.
*/
if (gated) {
data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B));
data |= 0x1ff;
data &= ~0xef0000;
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data);
data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING));
data |= 0x3ff000;
data &= ~0xffc00000;
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data);
data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2));
data |= 0x2;
data &= ~0x00010000;
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), data);
data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING));
data |= 0x37f;
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), data);
data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL));
data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
0x8;
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), data);
} else {
data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B));
data &= ~0x80010;
data |= 0xe70008;
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data);
data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING));
data |= 0xffc00000;
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data);
data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2));
data |= 0x10000;
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), data);
data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING));
data &= ~0xffc00000;
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), data);
data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL));
data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
0x8);
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), data);
}
vce_v4_0_override_vce_clock_gating(adev, false);
}
static void vce_v4_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
{
u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
if (enable)
tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
else
tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
}
static int vce_v4_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE);
int i;
if ((adev->asic_type == CHIP_POLARIS10) ||
(adev->asic_type == CHIP_TONGA) ||
(adev->asic_type == CHIP_FIJI))
vce_v4_0_set_bypass_mode(adev, enable);
if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
return 0;
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < 2; i++) {
/* Program VCE Instance 0 or 1 if not harvested */
if (adev->vce.harvest_config & (1 << i))
continue;
WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
if (enable) {
/* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
uint32_t data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A);
data &= ~(0xf | 0xff0);
data |= ((0x0 << 0) | (0x04 << 4));
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A, data);
/* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */
data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING);
data &= ~(0xf | 0xff0);
data |= ((0x0 << 0) | (0x04 << 4));
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING, data);
}
vce_v4_0_set_vce_sw_clock_gating(adev, enable);
}
WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
}
#endif
static int vce_v4_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
/* This doesn't actually powergate the VCE block.
* That's done in the dpm code via the SMC. This
* just re-inits the block as necessary. The actual
* gating still happens in the dpm code. We should
* revisit this when there is a cleaner line between
* the smc and the hw blocks
*/
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (state == AMD_PG_STATE_GATE)
return vce_v4_0_stop(adev);
else
return vce_v4_0_start(adev);
}
static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
struct amdgpu_ib *ib, uint32_t flags)
{
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
amdgpu_ring_write(ring, VCE_CMD_IB_VM);
amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, ib->length_dw);
}
static void vce_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
u64 seq, unsigned flags)
{
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
amdgpu_ring_write(ring, VCE_CMD_FENCE);
amdgpu_ring_write(ring, addr);
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, seq);
amdgpu_ring_write(ring, VCE_CMD_TRAP);
}
static void vce_v4_0_ring_insert_end(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, VCE_CMD_END);
}
static void vce_v4_0_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val, uint32_t mask)
{
amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
amdgpu_ring_write(ring, reg << 2);
amdgpu_ring_write(ring, mask);
amdgpu_ring_write(ring, val);
}
static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vmid, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
/* wait for reg writes */
vce_v4_0_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
vmid * hub->ctx_addr_distance,
lower_32_bits(pd_addr), 0xffffffff);
}
static void vce_v4_0_emit_wreg(struct amdgpu_ring *ring,
uint32_t reg, uint32_t val)
{
amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
amdgpu_ring_write(ring, reg << 2);
amdgpu_ring_write(ring, val);
}
static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
uint32_t val = 0;
if (!amdgpu_sriov_vf(adev)) {
if (state == AMDGPU_IRQ_STATE_ENABLE)
val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), val,
~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
}
return 0;
}
static int vce_v4_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
DRM_DEBUG("IH: VCE\n");
switch (entry->src_data[0]) {
case 0:
case 1:
case 2:
amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
break;
}
return 0;
}
const struct amd_ip_funcs vce_v4_0_ip_funcs = {
.name = "vce_v4_0",
.early_init = vce_v4_0_early_init,
.late_init = NULL,
.sw_init = vce_v4_0_sw_init,
.sw_fini = vce_v4_0_sw_fini,
.hw_init = vce_v4_0_hw_init,
.hw_fini = vce_v4_0_hw_fini,
.suspend = vce_v4_0_suspend,
.resume = vce_v4_0_resume,
.is_idle = NULL /* vce_v4_0_is_idle */,
.wait_for_idle = NULL /* vce_v4_0_wait_for_idle */,
.check_soft_reset = NULL /* vce_v4_0_check_soft_reset */,
.pre_soft_reset = NULL /* vce_v4_0_pre_soft_reset */,
.soft_reset = NULL /* vce_v4_0_soft_reset */,
.post_soft_reset = NULL /* vce_v4_0_post_soft_reset */,
.set_clockgating_state = vce_v4_0_set_clockgating_state,
.set_powergating_state = vce_v4_0_set_powergating_state,
};
static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCE,
.align_mask = 0x3f,
.nop = VCE_CMD_NO_OP,
.support_64bit_ptrs = false,
.no_user_fence = true,
.get_rptr = vce_v4_0_ring_get_rptr,
.get_wptr = vce_v4_0_ring_get_wptr,
.set_wptr = vce_v4_0_ring_set_wptr,
.parse_cs = amdgpu_vce_ring_parse_cs_vm,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
4 + /* vce_v4_0_emit_vm_flush */
5 + 5 + /* amdgpu_vce_ring_emit_fence x2 vm fence */
1, /* vce_v4_0_ring_insert_end */
.emit_ib_size = 5, /* vce_v4_0_ring_emit_ib */
.emit_ib = vce_v4_0_ring_emit_ib,
.emit_vm_flush = vce_v4_0_emit_vm_flush,
.emit_fence = vce_v4_0_ring_emit_fence,
.test_ring = amdgpu_vce_ring_test_ring,
.test_ib = amdgpu_vce_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.insert_end = vce_v4_0_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vce_ring_begin_use,
.end_use = amdgpu_vce_ring_end_use,
.emit_wreg = vce_v4_0_emit_wreg,
.emit_reg_wait = vce_v4_0_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
for (i = 0; i < adev->vce.num_rings; i++) {
adev->vce.ring[i].funcs = &vce_v4_0_ring_vm_funcs;
adev->vce.ring[i].me = i;
}
DRM_INFO("VCE enabled in VM mode\n");
}
static const struct amdgpu_irq_src_funcs vce_v4_0_irq_funcs = {
.set = vce_v4_0_set_interrupt_state,
.process = vce_v4_0_process_interrupt,
};
static void vce_v4_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->vce.irq.num_types = 1;
adev->vce.irq.funcs = &vce_v4_0_irq_funcs;
};
const struct amdgpu_ip_block_version vce_v4_0_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_VCE,
.major = 4,
.minor = 0,
.rev = 0,
.funcs = &vce_v4_0_ip_funcs,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/vce_v4_0.c |
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/pci.h>
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "soc15.h"
#include "soc15d.h"
#include "amdgpu_atomfirmware.h"
#include "amdgpu_pm.h"
#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"
#include "vega10_enum.h"
#include "soc15_common.h"
#include "clearstate_gfx9.h"
#include "v9_structs.h"
#include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
#include "amdgpu_ras.h"
#include "amdgpu_ring_mux.h"
#include "gfx_v9_4.h"
#include "gfx_v9_0.h"
#include "gfx_v9_4_2.h"
#include "asic_reg/pwr/pwr_10_0_offset.h"
#include "asic_reg/pwr/pwr_10_0_sh_mask.h"
#include "asic_reg/gc/gc_9_0_default.h"
#define GFX9_NUM_GFX_RINGS 1
#define GFX9_NUM_SW_GFX_RINGS 2
#define GFX9_MEC_HPD_SIZE 4096
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
#define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
#define mmGCEA_PROBE_MAP 0x070c
#define mmGCEA_PROBE_MAP_BASE_IDX 0
MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
MODULE_FIRMWARE("amdgpu/vega10_me.bin");
MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");
MODULE_FIRMWARE("amdgpu/vega12_ce.bin");
MODULE_FIRMWARE("amdgpu/vega12_pfp.bin");
MODULE_FIRMWARE("amdgpu/vega12_me.bin");
MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");
MODULE_FIRMWARE("amdgpu/vega20_ce.bin");
MODULE_FIRMWARE("amdgpu/vega20_pfp.bin");
MODULE_FIRMWARE("amdgpu/vega20_me.bin");
MODULE_FIRMWARE("amdgpu/vega20_mec.bin");
MODULE_FIRMWARE("amdgpu/vega20_mec2.bin");
MODULE_FIRMWARE("amdgpu/vega20_rlc.bin");
MODULE_FIRMWARE("amdgpu/raven_ce.bin");
MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
MODULE_FIRMWARE("amdgpu/raven_me.bin");
MODULE_FIRMWARE("amdgpu/raven_mec.bin");
MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
MODULE_FIRMWARE("amdgpu/picasso_ce.bin");
MODULE_FIRMWARE("amdgpu/picasso_pfp.bin");
MODULE_FIRMWARE("amdgpu/picasso_me.bin");
MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin");
MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
MODULE_FIRMWARE("amdgpu/raven2_me.bin");
MODULE_FIRMWARE("amdgpu/raven2_mec.bin");
MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin");
MODULE_FIRMWARE("amdgpu/arcturus_mec.bin");
MODULE_FIRMWARE("amdgpu/arcturus_rlc.bin");
MODULE_FIRMWARE("amdgpu/renoir_ce.bin");
MODULE_FIRMWARE("amdgpu/renoir_pfp.bin");
MODULE_FIRMWARE("amdgpu/renoir_me.bin");
MODULE_FIRMWARE("amdgpu/renoir_mec.bin");
MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
MODULE_FIRMWARE("amdgpu/green_sardine_ce.bin");
MODULE_FIRMWARE("amdgpu/green_sardine_pfp.bin");
MODULE_FIRMWARE("amdgpu/green_sardine_me.bin");
MODULE_FIRMWARE("amdgpu/green_sardine_mec.bin");
MODULE_FIRMWARE("amdgpu/green_sardine_mec2.bin");
MODULE_FIRMWARE("amdgpu/green_sardine_rlc.bin");
MODULE_FIRMWARE("amdgpu/aldebaran_mec.bin");
MODULE_FIRMWARE("amdgpu/aldebaran_mec2.bin");
MODULE_FIRMWARE("amdgpu/aldebaran_rlc.bin");
MODULE_FIRMWARE("amdgpu/aldebaran_sjt_mec.bin");
MODULE_FIRMWARE("amdgpu/aldebaran_sjt_mec2.bin");
#define mmTCP_CHAN_STEER_0_ARCT 0x0b03
#define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX 0
#define mmTCP_CHAN_STEER_1_ARCT 0x0b04
#define mmTCP_CHAN_STEER_1_ARCT_BASE_IDX 0
#define mmTCP_CHAN_STEER_2_ARCT 0x0b09
#define mmTCP_CHAN_STEER_2_ARCT_BASE_IDX 0
#define mmTCP_CHAN_STEER_3_ARCT 0x0b0a
#define mmTCP_CHAN_STEER_3_ARCT_BASE_IDX 0
#define mmTCP_CHAN_STEER_4_ARCT 0x0b0b
#define mmTCP_CHAN_STEER_4_ARCT_BASE_IDX 0
#define mmTCP_CHAN_STEER_5_ARCT 0x0b0c
#define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX 0
#define mmGOLDEN_TSC_COUNT_UPPER_Renoir 0x0025
#define mmGOLDEN_TSC_COUNT_UPPER_Renoir_BASE_IDX 1
#define mmGOLDEN_TSC_COUNT_LOWER_Renoir 0x0026
#define mmGOLDEN_TSC_COUNT_LOWER_Renoir_BASE_IDX 1
enum ta_ras_gfx_subblock {
/*CPC*/
TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
TA_RAS_BLOCK__GFX_CPC_SCRATCH = TA_RAS_BLOCK__GFX_CPC_INDEX_START,
TA_RAS_BLOCK__GFX_CPC_UCODE,
TA_RAS_BLOCK__GFX_DC_STATE_ME1,
TA_RAS_BLOCK__GFX_DC_CSINVOC_ME1,
TA_RAS_BLOCK__GFX_DC_RESTORE_ME1,
TA_RAS_BLOCK__GFX_DC_STATE_ME2,
TA_RAS_BLOCK__GFX_DC_CSINVOC_ME2,
TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
TA_RAS_BLOCK__GFX_CPC_INDEX_END = TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
/* CPF*/
TA_RAS_BLOCK__GFX_CPF_INDEX_START,
TA_RAS_BLOCK__GFX_CPF_ROQ_ME2 = TA_RAS_BLOCK__GFX_CPF_INDEX_START,
TA_RAS_BLOCK__GFX_CPF_ROQ_ME1,
TA_RAS_BLOCK__GFX_CPF_TAG,
TA_RAS_BLOCK__GFX_CPF_INDEX_END = TA_RAS_BLOCK__GFX_CPF_TAG,
/* CPG*/
TA_RAS_BLOCK__GFX_CPG_INDEX_START,
TA_RAS_BLOCK__GFX_CPG_DMA_ROQ = TA_RAS_BLOCK__GFX_CPG_INDEX_START,
TA_RAS_BLOCK__GFX_CPG_DMA_TAG,
TA_RAS_BLOCK__GFX_CPG_TAG,
TA_RAS_BLOCK__GFX_CPG_INDEX_END = TA_RAS_BLOCK__GFX_CPG_TAG,
/* GDS*/
TA_RAS_BLOCK__GFX_GDS_INDEX_START,
TA_RAS_BLOCK__GFX_GDS_MEM = TA_RAS_BLOCK__GFX_GDS_INDEX_START,
TA_RAS_BLOCK__GFX_GDS_INPUT_QUEUE,
TA_RAS_BLOCK__GFX_GDS_OA_PHY_CMD_RAM_MEM,
TA_RAS_BLOCK__GFX_GDS_OA_PHY_DATA_RAM_MEM,
TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
TA_RAS_BLOCK__GFX_GDS_INDEX_END = TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
/* SPI*/
TA_RAS_BLOCK__GFX_SPI_SR_MEM,
/* SQ*/
TA_RAS_BLOCK__GFX_SQ_INDEX_START,
TA_RAS_BLOCK__GFX_SQ_SGPR = TA_RAS_BLOCK__GFX_SQ_INDEX_START,
TA_RAS_BLOCK__GFX_SQ_LDS_D,
TA_RAS_BLOCK__GFX_SQ_LDS_I,
TA_RAS_BLOCK__GFX_SQ_VGPR, /* VGPR = SP*/
TA_RAS_BLOCK__GFX_SQ_INDEX_END = TA_RAS_BLOCK__GFX_SQ_VGPR,
/* SQC (3 ranges)*/
TA_RAS_BLOCK__GFX_SQC_INDEX_START,
/* SQC range 0*/
TA_RAS_BLOCK__GFX_SQC_INDEX0_START = TA_RAS_BLOCK__GFX_SQC_INDEX_START,
TA_RAS_BLOCK__GFX_SQC_INST_UTCL1_LFIFO =
TA_RAS_BLOCK__GFX_SQC_INDEX0_START,
TA_RAS_BLOCK__GFX_SQC_DATA_CU0_WRITE_DATA_BUF,
TA_RAS_BLOCK__GFX_SQC_DATA_CU0_UTCL1_LFIFO,
TA_RAS_BLOCK__GFX_SQC_DATA_CU1_WRITE_DATA_BUF,
TA_RAS_BLOCK__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
TA_RAS_BLOCK__GFX_SQC_DATA_CU2_WRITE_DATA_BUF,
TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
TA_RAS_BLOCK__GFX_SQC_INDEX0_END =
TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
/* SQC range 1*/
TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
TA_RAS_BLOCK__GFX_SQC_INST_BANKA_TAG_RAM =
TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
TA_RAS_BLOCK__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO,
TA_RAS_BLOCK__GFX_SQC_INST_BANKA_MISS_FIFO,
TA_RAS_BLOCK__GFX_SQC_INST_BANKA_BANK_RAM,
TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_TAG_RAM,
TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_HIT_FIFO,
TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_MISS_FIFO,
TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM,
TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
TA_RAS_BLOCK__GFX_SQC_INDEX1_END =
TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
/* SQC range 2*/
TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
TA_RAS_BLOCK__GFX_SQC_INST_BANKB_TAG_RAM =
TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
TA_RAS_BLOCK__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO,
TA_RAS_BLOCK__GFX_SQC_INST_BANKB_MISS_FIFO,
TA_RAS_BLOCK__GFX_SQC_INST_BANKB_BANK_RAM,
TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_TAG_RAM,
TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_HIT_FIFO,
TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_MISS_FIFO,
TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM,
TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
TA_RAS_BLOCK__GFX_SQC_INDEX2_END =
TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
TA_RAS_BLOCK__GFX_SQC_INDEX_END = TA_RAS_BLOCK__GFX_SQC_INDEX2_END,
/* TA*/
TA_RAS_BLOCK__GFX_TA_INDEX_START,
TA_RAS_BLOCK__GFX_TA_FS_DFIFO = TA_RAS_BLOCK__GFX_TA_INDEX_START,
TA_RAS_BLOCK__GFX_TA_FS_AFIFO,
TA_RAS_BLOCK__GFX_TA_FL_LFIFO,
TA_RAS_BLOCK__GFX_TA_FX_LFIFO,
TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
TA_RAS_BLOCK__GFX_TA_INDEX_END = TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
/* TCA*/
TA_RAS_BLOCK__GFX_TCA_INDEX_START,
TA_RAS_BLOCK__GFX_TCA_HOLE_FIFO = TA_RAS_BLOCK__GFX_TCA_INDEX_START,
TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
TA_RAS_BLOCK__GFX_TCA_INDEX_END = TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
/* TCC (5 sub-ranges)*/
TA_RAS_BLOCK__GFX_TCC_INDEX_START,
/* TCC range 0*/
TA_RAS_BLOCK__GFX_TCC_INDEX0_START = TA_RAS_BLOCK__GFX_TCC_INDEX_START,
TA_RAS_BLOCK__GFX_TCC_CACHE_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX0_START,
TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_0_1,
TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_0,
TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_1,
TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_0,
TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_1,
TA_RAS_BLOCK__GFX_TCC_HIGH_RATE_TAG,
TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
TA_RAS_BLOCK__GFX_TCC_INDEX0_END = TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
/* TCC range 1*/
TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
TA_RAS_BLOCK__GFX_TCC_IN_USE_DEC = TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
TA_RAS_BLOCK__GFX_TCC_INDEX1_END =
TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
/* TCC range 2*/
TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
TA_RAS_BLOCK__GFX_TCC_RETURN_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
TA_RAS_BLOCK__GFX_TCC_RETURN_CONTROL,
TA_RAS_BLOCK__GFX_TCC_UC_ATOMIC_FIFO,
TA_RAS_BLOCK__GFX_TCC_WRITE_RETURN,
TA_RAS_BLOCK__GFX_TCC_WRITE_CACHE_READ,
TA_RAS_BLOCK__GFX_TCC_SRC_FIFO,
TA_RAS_BLOCK__GFX_TCC_SRC_FIFO_NEXT_RAM,
TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
TA_RAS_BLOCK__GFX_TCC_INDEX2_END =
TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
/* TCC range 3*/
TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO = TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
TA_RAS_BLOCK__GFX_TCC_INDEX3_END =
TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
/* TCC range 4*/
TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
TA_RAS_BLOCK__GFX_TCC_WRRET_TAG_WRITE_RETURN =
TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
TA_RAS_BLOCK__GFX_TCC_INDEX4_END =
TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
TA_RAS_BLOCK__GFX_TCC_INDEX_END = TA_RAS_BLOCK__GFX_TCC_INDEX4_END,
/* TCI*/
TA_RAS_BLOCK__GFX_TCI_WRITE_RAM,
/* TCP*/
TA_RAS_BLOCK__GFX_TCP_INDEX_START,
TA_RAS_BLOCK__GFX_TCP_CACHE_RAM = TA_RAS_BLOCK__GFX_TCP_INDEX_START,
TA_RAS_BLOCK__GFX_TCP_LFIFO_RAM,
TA_RAS_BLOCK__GFX_TCP_CMD_FIFO,
TA_RAS_BLOCK__GFX_TCP_VM_FIFO,
TA_RAS_BLOCK__GFX_TCP_DB_RAM,
TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO0,
TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
TA_RAS_BLOCK__GFX_TCP_INDEX_END = TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
/* TD*/
TA_RAS_BLOCK__GFX_TD_INDEX_START,
TA_RAS_BLOCK__GFX_TD_SS_FIFO_LO = TA_RAS_BLOCK__GFX_TD_INDEX_START,
TA_RAS_BLOCK__GFX_TD_SS_FIFO_HI,
TA_RAS_BLOCK__GFX_TD_CS_FIFO,
TA_RAS_BLOCK__GFX_TD_INDEX_END = TA_RAS_BLOCK__GFX_TD_CS_FIFO,
/* EA (3 sub-ranges)*/
TA_RAS_BLOCK__GFX_EA_INDEX_START,
/* EA range 0*/
TA_RAS_BLOCK__GFX_EA_INDEX0_START = TA_RAS_BLOCK__GFX_EA_INDEX_START,
TA_RAS_BLOCK__GFX_EA_DRAMRD_CMDMEM = TA_RAS_BLOCK__GFX_EA_INDEX0_START,
TA_RAS_BLOCK__GFX_EA_DRAMWR_CMDMEM,
TA_RAS_BLOCK__GFX_EA_DRAMWR_DATAMEM,
TA_RAS_BLOCK__GFX_EA_RRET_TAGMEM,
TA_RAS_BLOCK__GFX_EA_WRET_TAGMEM,
TA_RAS_BLOCK__GFX_EA_GMIRD_CMDMEM,
TA_RAS_BLOCK__GFX_EA_GMIWR_CMDMEM,
TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
TA_RAS_BLOCK__GFX_EA_INDEX0_END = TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
/* EA range 1*/
TA_RAS_BLOCK__GFX_EA_INDEX1_START,
TA_RAS_BLOCK__GFX_EA_DRAMRD_PAGEMEM = TA_RAS_BLOCK__GFX_EA_INDEX1_START,
TA_RAS_BLOCK__GFX_EA_DRAMWR_PAGEMEM,
TA_RAS_BLOCK__GFX_EA_IORD_CMDMEM,
TA_RAS_BLOCK__GFX_EA_IOWR_CMDMEM,
TA_RAS_BLOCK__GFX_EA_IOWR_DATAMEM,
TA_RAS_BLOCK__GFX_EA_GMIRD_PAGEMEM,
TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
TA_RAS_BLOCK__GFX_EA_INDEX1_END = TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
/* EA range 2*/
TA_RAS_BLOCK__GFX_EA_INDEX2_START,
TA_RAS_BLOCK__GFX_EA_MAM_D0MEM = TA_RAS_BLOCK__GFX_EA_INDEX2_START,
TA_RAS_BLOCK__GFX_EA_MAM_D1MEM,
TA_RAS_BLOCK__GFX_EA_MAM_D2MEM,
TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
TA_RAS_BLOCK__GFX_EA_INDEX2_END = TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
TA_RAS_BLOCK__GFX_EA_INDEX_END = TA_RAS_BLOCK__GFX_EA_INDEX2_END,
/* UTC VM L2 bank*/
TA_RAS_BLOCK__UTC_VML2_BANK_CACHE,
/* UTC VM walker*/
TA_RAS_BLOCK__UTC_VML2_WALKER,
/* UTC ATC L2 2MB cache*/
TA_RAS_BLOCK__UTC_ATCL2_CACHE_2M_BANK,
/* UTC ATC L2 4KB cache*/
TA_RAS_BLOCK__UTC_ATCL2_CACHE_4K_BANK,
TA_RAS_BLOCK__GFX_MAX
};
struct ras_gfx_subblock {
unsigned char *name;
int ta_subblock;
int hw_supported_error_type;
int sw_supported_error_type;
};
#define AMDGPU_RAS_SUB_BLOCK(subblock, a, b, c, d, e, f, g, h) \
[AMDGPU_RAS_BLOCK__##subblock] = { \
#subblock, \
TA_RAS_BLOCK__##subblock, \
((a) | ((b) << 1) | ((c) << 2) | ((d) << 3)), \
(((e) << 1) | ((f) << 3) | (g) | ((h) << 2)), \
}
static const struct ras_gfx_subblock ras_gfx_subblocks[] = {
AMDGPU_RAS_SUB_BLOCK(GFX_CPC_SCRATCH, 0, 1, 1, 1, 1, 0, 0, 1),
AMDGPU_RAS_SUB_BLOCK(GFX_CPC_UCODE, 0, 1, 1, 1, 1, 0, 0, 1),
AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_CPF_TAG, 0, 1, 1, 1, 1, 0, 0, 1),
AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_ROQ, 1, 0, 0, 1, 0, 0, 1, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_TAG, 0, 1, 1, 1, 0, 1, 0, 1),
AMDGPU_RAS_SUB_BLOCK(GFX_CPG_TAG, 0, 1, 1, 1, 1, 1, 0, 1),
AMDGPU_RAS_SUB_BLOCK(GFX_GDS_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_GDS_INPUT_QUEUE, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_CMD_RAM_MEM, 0, 1, 1, 1, 0, 0, 0,
0),
AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_DATA_RAM_MEM, 1, 0, 0, 1, 0, 0, 0,
0),
AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PIPE_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_SPI_SR_MEM, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_SQ_SGPR, 0, 1, 1, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_D, 0, 1, 1, 1, 1, 0, 0, 1),
AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_I, 0, 1, 1, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_SQ_VGPR, 0, 1, 1, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, 1),
AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
0),
AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_UTCL1_LFIFO, 0, 1, 1, 1, 1, 0, 0,
0),
AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
0),
AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
1),
AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
0),
AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
0),
AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
0),
AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
0),
AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
0),
AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
0),
AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
0),
AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
0),
AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
0),
AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
0),
AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
0),
AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
0),
AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
0),
AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_DFIFO, 0, 1, 1, 1, 1, 0, 0, 1),
AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_AFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_TA_FL_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_TA_FX_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_CFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_TCA_HOLE_FIFO, 1, 0, 0, 1, 0, 1, 1, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_TCA_REQ_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA, 0, 1, 1, 1, 1, 0, 0, 1),
AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_0_1, 0, 1, 1, 1, 1, 0, 0,
1),
AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_0, 0, 1, 1, 1, 1, 0, 0,
1),
AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_1, 0, 1, 1, 1, 1, 0, 0,
1),
AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_0, 0, 1, 1, 1, 0, 0, 0,
0),
AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_1, 0, 1, 1, 1, 0, 0, 0,
0),
AMDGPU_RAS_SUB_BLOCK(GFX_TCC_HIGH_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LOW_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_DEC, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_TRANSFER, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_DATA, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_CONTROL, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_TCC_UC_ATOMIC_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_RETURN, 1, 0, 0, 1, 0, 1, 1, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_CACHE_READ, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 1, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_TAG_PROBE_FIFO, 1, 0, 0, 1, 0, 0, 0,
0),
AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 0,
0),
AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRRET_TAG_WRITE_RETURN, 1, 0, 0, 1, 0, 0,
0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_TCC_ATOMIC_RETURN_BUFFER, 1, 0, 0, 1, 0, 0, 0,
0),
AMDGPU_RAS_SUB_BLOCK(GFX_TCI_WRITE_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CACHE_RAM, 0, 1, 1, 1, 1, 0, 0, 1),
AMDGPU_RAS_SUB_BLOCK(GFX_TCP_LFIFO_RAM, 0, 1, 1, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CMD_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_TCP_VM_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_TCP_DB_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO0, 0, 1, 1, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO1, 0, 1, 1, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_LO, 0, 1, 1, 1, 1, 0, 0, 1),
AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_HI, 0, 1, 1, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_TD_CS_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_CMDMEM, 0, 1, 1, 1, 1, 0, 0, 1),
AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_EA_RRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_EA_WRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_EA_IORD_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_DATAMEM, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D0MEM, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D1MEM, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D2MEM, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D3MEM, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(UTC_VML2_BANK_CACHE, 0, 1, 1, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(UTC_VML2_WALKER, 0, 1, 1, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_2M_BANK, 1, 0, 0, 1, 0, 0, 0, 0),
AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_4K_BANK, 0, 1, 1, 1, 0, 0, 0, 0),
};
static const struct soc15_reg_golden golden_settings_gc_9_0[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x00ffff87),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x00ffff8f),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
};
static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
};
static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000)
};
static const struct soc15_reg_golden golden_settings_gc_9_1[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
};
static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
};
static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0xff7fffff, 0x04000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x7f0fffff, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0xff8fffff, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x7f8fffff, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x26013041),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x26013041),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0xff0fffff, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0xff0fffff, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0xff0fffff, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0xff0fffff, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0xff0fffff, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
};
static const struct soc15_reg_golden golden_settings_gc_9_1_rn[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x24000042),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x24000042),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_PROBE_MAP, 0xffffffff, 0x0000cccc),
};
static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
};
static const struct soc15_reg_golden golden_settings_gc_9_2_1[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
};
static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
};
static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x10b0000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_0_ARCT, 0x3fffffff, 0x346f0a4e),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_1_ARCT, 0x3fffffff, 0x1c642ca),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_2_ARCT, 0x3fffffff, 0x26f45098),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_3_ARCT, 0x3fffffff, 0x2ebd9fe3),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_4_ARCT, 0x3fffffff, 0xb90f5b1),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_UTCL1_CNTL1, 0x30000000, 0x30000000)
};
static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = {
{SOC15_REG_ENTRY(GC, 0, mmGRBM_GFX_INDEX)},
{SOC15_REG_ENTRY(GC, 0, mmSQ_IND_INDEX)},
};
static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
{
mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
};
static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
{
mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0,
mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0,
mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0,
mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0,
mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0,
mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0,
mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0,
mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
};
#define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
#define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
#define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
#define RAVEN2_GB_ADDR_CONFIG_GOLDEN 0x26013041
static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
struct amdgpu_cu_info *cu_info);
static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume, bool usegds);
static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status);
static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
void *inject_if, uint32_t instance_mask);
static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev);
static void gfx_v9_0_update_spm_vmid_internal(struct amdgpu_device *adev,
unsigned int vmid);
static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
uint64_t queue_mask)
{
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
amdgpu_ring_write(kiq_ring,
PACKET3_SET_RESOURCES_VMID_MASK(0) |
/* vmid_mask:0* queue_type:0 (KIQ) */
PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
amdgpu_ring_write(kiq_ring,
lower_32_bits(queue_mask)); /* queue mask lo */
amdgpu_ring_write(kiq_ring,
upper_32_bits(queue_mask)); /* queue mask hi */
amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
amdgpu_ring_write(kiq_ring, 0); /* oac mask */
amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
}
static void gfx_v9_0_kiq_map_queues(struct amdgpu_ring *kiq_ring,
struct amdgpu_ring *ring)
{
uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
uint64_t wptr_addr = ring->wptr_gpu_addr;
uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
/*queue_type: normal compute queue */
PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
/* alloc format: all_on_one_pipe */
PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
/* num_queues: must be 1 */
PACKET3_MAP_QUEUES_NUM_QUEUES(1));
amdgpu_ring_write(kiq_ring,
PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
}
static void gfx_v9_0_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
struct amdgpu_ring *ring,
enum amdgpu_unmap_queues_action action,
u64 gpu_addr, u64 seq)
{
uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
PACKET3_UNMAP_QUEUES_ACTION(action) |
PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
amdgpu_ring_write(kiq_ring,
PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
if (action == PREEMPT_QUEUES_NO_UNMAP) {
amdgpu_ring_write(kiq_ring, lower_32_bits(ring->wptr & ring->buf_mask));
amdgpu_ring_write(kiq_ring, 0);
amdgpu_ring_write(kiq_ring, 0);
} else {
amdgpu_ring_write(kiq_ring, 0);
amdgpu_ring_write(kiq_ring, 0);
amdgpu_ring_write(kiq_ring, 0);
}
}
static void gfx_v9_0_kiq_query_status(struct amdgpu_ring *kiq_ring,
struct amdgpu_ring *ring,
u64 addr,
u64 seq)
{
uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
amdgpu_ring_write(kiq_ring,
PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
PACKET3_QUERY_STATUS_COMMAND(2));
/* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
amdgpu_ring_write(kiq_ring,
PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
}
static void gfx_v9_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
uint16_t pasid, uint32_t flush_type,
bool all_hub)
{
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
amdgpu_ring_write(kiq_ring,
PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
PACKET3_INVALIDATE_TLBS_PASID(pasid) |
PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
}
static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = {
.kiq_set_resources = gfx_v9_0_kiq_set_resources,
.kiq_map_queues = gfx_v9_0_kiq_map_queues,
.kiq_unmap_queues = gfx_v9_0_kiq_unmap_queues,
.kiq_query_status = gfx_v9_0_kiq_query_status,
.kiq_invalidate_tlbs = gfx_v9_0_kiq_invalidate_tlbs,
.set_resources_size = 8,
.map_queues_size = 7,
.unmap_queues_size = 6,
.query_status_size = 7,
.invalidate_tlbs_size = 2,
};
static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
{
adev->gfx.kiq[0].pmf = &gfx_v9_0_kiq_pm4_funcs;
}
static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 0, 1):
soc15_program_register_sequence(adev,
golden_settings_gc_9_0,
ARRAY_SIZE(golden_settings_gc_9_0));
soc15_program_register_sequence(adev,
golden_settings_gc_9_0_vg10,
ARRAY_SIZE(golden_settings_gc_9_0_vg10));
break;
case IP_VERSION(9, 2, 1):
soc15_program_register_sequence(adev,
golden_settings_gc_9_2_1,
ARRAY_SIZE(golden_settings_gc_9_2_1));
soc15_program_register_sequence(adev,
golden_settings_gc_9_2_1_vg12,
ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
break;
case IP_VERSION(9, 4, 0):
soc15_program_register_sequence(adev,
golden_settings_gc_9_0,
ARRAY_SIZE(golden_settings_gc_9_0));
soc15_program_register_sequence(adev,
golden_settings_gc_9_0_vg20,
ARRAY_SIZE(golden_settings_gc_9_0_vg20));
break;
case IP_VERSION(9, 4, 1):
soc15_program_register_sequence(adev,
golden_settings_gc_9_4_1_arct,
ARRAY_SIZE(golden_settings_gc_9_4_1_arct));
break;
case IP_VERSION(9, 2, 2):
case IP_VERSION(9, 1, 0):
soc15_program_register_sequence(adev, golden_settings_gc_9_1,
ARRAY_SIZE(golden_settings_gc_9_1));
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
soc15_program_register_sequence(adev,
golden_settings_gc_9_1_rv2,
ARRAY_SIZE(golden_settings_gc_9_1_rv2));
else
soc15_program_register_sequence(adev,
golden_settings_gc_9_1_rv1,
ARRAY_SIZE(golden_settings_gc_9_1_rv1));
break;
case IP_VERSION(9, 3, 0):
soc15_program_register_sequence(adev,
golden_settings_gc_9_1_rn,
ARRAY_SIZE(golden_settings_gc_9_1_rn));
return; /* for renoir, don't need common goldensetting */
case IP_VERSION(9, 4, 2):
gfx_v9_4_2_init_golden_registers(adev,
adev->smuio.funcs->get_die_id(adev));
break;
default:
break;
}
if ((adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1)) &&
(adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 2)))
soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
(const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
}
static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
bool wc, uint32_t reg, uint32_t val)
{
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
WRITE_DATA_DST_SEL(0) |
(wc ? WR_CONFIRM : 0));
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, val);
}
static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
int mem_space, int opt, uint32_t addr0,
uint32_t addr1, uint32_t ref, uint32_t mask,
uint32_t inv)
{
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
amdgpu_ring_write(ring,
/* memory (1) or register (0) */
(WAIT_REG_MEM_MEM_SPACE(mem_space) |
WAIT_REG_MEM_OPERATION(opt) | /* wait */
WAIT_REG_MEM_FUNCTION(3) | /* equal */
WAIT_REG_MEM_ENGINE(eng_sel)));
if (mem_space)
BUG_ON(addr0 & 0x3); /* Dword align */
amdgpu_ring_write(ring, addr0);
amdgpu_ring_write(ring, addr1);
amdgpu_ring_write(ring, ref);
amdgpu_ring_write(ring, mask);
amdgpu_ring_write(ring, inv); /* poll interval */
}
static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
uint32_t scratch = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
uint32_t tmp = 0;
unsigned i;
int r;
WREG32(scratch, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
if (r)
return r;
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
amdgpu_ring_write(ring, scratch - PACKET3_SET_UCONFIG_REG_START);
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF)
break;
udelay(1);
}
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
return r;
}
static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct dma_fence *f = NULL;
unsigned index;
uint64_t gpu_addr;
uint32_t tmp;
long r;
r = amdgpu_device_wb_get(adev, &index);
if (r)
return r;
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 16,
AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err1;
ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
ib.ptr[2] = lower_32_bits(gpu_addr);
ib.ptr[3] = upper_32_bits(gpu_addr);
ib.ptr[4] = 0xDEADBEEF;
ib.length_dw = 5;
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
goto err2;
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
r = -ETIMEDOUT;
goto err2;
} else if (r < 0) {
goto err2;
}
tmp = adev->wb.wb[index];
if (tmp == 0xDEADBEEF)
r = 0;
else
r = -EINVAL;
err2:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err1:
amdgpu_device_wb_free(adev, index);
return r;
}
static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
{
amdgpu_ucode_release(&adev->gfx.pfp_fw);
amdgpu_ucode_release(&adev->gfx.me_fw);
amdgpu_ucode_release(&adev->gfx.ce_fw);
amdgpu_ucode_release(&adev->gfx.rlc_fw);
amdgpu_ucode_release(&adev->gfx.mec_fw);
amdgpu_ucode_release(&adev->gfx.mec2_fw);
kfree(adev->gfx.rlc.register_list_format);
}
static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
{
adev->gfx.me_fw_write_wait = false;
adev->gfx.mec_fw_write_wait = false;
if ((adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1)) &&
((adev->gfx.mec_fw_version < 0x000001a5) ||
(adev->gfx.mec_feature_version < 46) ||
(adev->gfx.pfp_fw_version < 0x000000b7) ||
(adev->gfx.pfp_feature_version < 46)))
DRM_WARN_ONCE("CP firmware version too old, please update!");
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 0, 1):
if ((adev->gfx.me_fw_version >= 0x0000009c) &&
(adev->gfx.me_feature_version >= 42) &&
(adev->gfx.pfp_fw_version >= 0x000000b1) &&
(adev->gfx.pfp_feature_version >= 42))
adev->gfx.me_fw_write_wait = true;
if ((adev->gfx.mec_fw_version >= 0x00000193) &&
(adev->gfx.mec_feature_version >= 42))
adev->gfx.mec_fw_write_wait = true;
break;
case IP_VERSION(9, 2, 1):
if ((adev->gfx.me_fw_version >= 0x0000009c) &&
(adev->gfx.me_feature_version >= 44) &&
(adev->gfx.pfp_fw_version >= 0x000000b2) &&
(adev->gfx.pfp_feature_version >= 44))
adev->gfx.me_fw_write_wait = true;
if ((adev->gfx.mec_fw_version >= 0x00000196) &&
(adev->gfx.mec_feature_version >= 44))
adev->gfx.mec_fw_write_wait = true;
break;
case IP_VERSION(9, 4, 0):
if ((adev->gfx.me_fw_version >= 0x0000009c) &&
(adev->gfx.me_feature_version >= 44) &&
(adev->gfx.pfp_fw_version >= 0x000000b2) &&
(adev->gfx.pfp_feature_version >= 44))
adev->gfx.me_fw_write_wait = true;
if ((adev->gfx.mec_fw_version >= 0x00000197) &&
(adev->gfx.mec_feature_version >= 44))
adev->gfx.mec_fw_write_wait = true;
break;
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 2):
if ((adev->gfx.me_fw_version >= 0x0000009c) &&
(adev->gfx.me_feature_version >= 42) &&
(adev->gfx.pfp_fw_version >= 0x000000b1) &&
(adev->gfx.pfp_feature_version >= 42))
adev->gfx.me_fw_write_wait = true;
if ((adev->gfx.mec_fw_version >= 0x00000192) &&
(adev->gfx.mec_feature_version >= 42))
adev->gfx.mec_fw_write_wait = true;
break;
default:
adev->gfx.me_fw_write_wait = true;
adev->gfx.mec_fw_write_wait = true;
break;
}
}
struct amdgpu_gfxoff_quirk {
u16 chip_vendor;
u16 chip_device;
u16 subsys_vendor;
u16 subsys_device;
u8 revision;
};
static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
/* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */
{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
/* https://bugzilla.kernel.org/show_bug.cgi?id=207171 */
{ 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
/* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
/* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */
{ 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 },
{ 0, 0, 0, 0, 0 },
};
static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev)
{
const struct amdgpu_gfxoff_quirk *p = amdgpu_gfxoff_quirk_list;
while (p && p->chip_device != 0) {
if (pdev->vendor == p->chip_vendor &&
pdev->device == p->chip_device &&
pdev->subsystem_vendor == p->subsys_vendor &&
pdev->subsystem_device == p->subsys_device &&
pdev->revision == p->revision) {
return true;
}
++p;
}
return false;
}
static bool is_raven_kicker(struct amdgpu_device *adev)
{
if (adev->pm.fw_version >= 0x41e2b)
return true;
else
return false;
}
static bool check_if_enlarge_doorbell_range(struct amdgpu_device *adev)
{
if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 3, 0)) &&
(adev->gfx.me_fw_version >= 0x000000a5) &&
(adev->gfx.me_feature_version >= 52))
return true;
else
return false;
}
static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
{
if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 0, 1):
case IP_VERSION(9, 2, 1):
case IP_VERSION(9, 4, 0):
break;
case IP_VERSION(9, 2, 2):
case IP_VERSION(9, 1, 0):
if (!((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
(adev->apu_flags & AMD_APU_IS_PICASSO)) &&
((!is_raven_kicker(adev) &&
adev->gfx.rlc_fw_version < 531) ||
(adev->gfx.rlc_feature_version < 1) ||
!adev->gfx.rlc.is_rlc_v2_1))
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_RLC_SMU_HS;
break;
case IP_VERSION(9, 3, 0):
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_RLC_SMU_HS;
break;
default:
break;
}
}
static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
char *chip_name)
{
char fw_name[30];
int err;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_CE);
out:
if (err) {
amdgpu_ucode_release(&adev->gfx.pfp_fw);
amdgpu_ucode_release(&adev->gfx.me_fw);
amdgpu_ucode_release(&adev->gfx.ce_fw);
}
return err;
}
static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
char *chip_name)
{
char fw_name[30];
int err;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
uint16_t version_major;
uint16_t version_minor;
uint32_t smu_version;
/*
* For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
* instead of picasso_rlc.bin.
* Judgment method:
* PCO AM4: revision >= 0xC8 && revision <= 0xCF
* or revision >= 0xD8 && revision <= 0xDF
* otherwise is PCO FP5
*/
if (!strcmp(chip_name, "picasso") &&
(((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
else if (!strcmp(chip_name, "raven") && (amdgpu_pm_load_smu_firmware(adev, &smu_version) == 0) &&
(smu_version >= 0x41e2b))
/**
*SMC is loaded by SBIOS on APU and it's able to get the SMU version directly.
*/
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_kicker_rlc.bin", chip_name);
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
if (err)
goto out;
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
out:
if (err)
amdgpu_ucode_release(&adev->gfx.rlc_fw);
return err;
}
static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev)
{
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||
adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 3, 0))
return false;
return true;
}
static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
char *chip_name)
{
char fw_name[30];
int err;
if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sjt_mec.bin", chip_name);
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sjt_mec2.bin", chip_name);
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
/* ignore failures to load */
err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
if (!err) {
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
} else {
err = 0;
amdgpu_ucode_release(&adev->gfx.mec2_fw);
}
} else {
adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
}
gfx_v9_0_check_if_need_gfxoff(adev);
gfx_v9_0_check_fw_write_wait(adev);
out:
if (err)
amdgpu_ucode_release(&adev->gfx.mec_fw);
return err;
}
static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
{
char ucode_prefix[30];
int r;
DRM_DEBUG("\n");
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
/* No CPG in Arcturus */
if (adev->gfx.num_gfx_rings) {
r = gfx_v9_0_init_cp_gfx_microcode(adev, ucode_prefix);
if (r)
return r;
}
r = gfx_v9_0_init_rlc_microcode(adev, ucode_prefix);
if (r)
return r;
r = gfx_v9_0_init_cp_compute_microcode(adev, ucode_prefix);
if (r)
return r;
return r;
}
static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
{
u32 count = 0;
const struct cs_section_def *sect = NULL;
const struct cs_extent_def *ext = NULL;
/* begin clear state */
count += 2;
/* context control state */
count += 3;
for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
for (ext = sect->section; ext->extent != NULL; ++ext) {
if (sect->id == SECT_CONTEXT)
count += 2 + ext->reg_count;
else
return 0;
}
}
/* end clear state */
count += 2;
/* clear state */
count += 2;
return count;
}
static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
volatile u32 *buffer)
{
u32 count = 0, i;
const struct cs_section_def *sect = NULL;
const struct cs_extent_def *ext = NULL;
if (adev->gfx.rlc.cs_data == NULL)
return;
if (buffer == NULL)
return;
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
buffer[count++] = cpu_to_le32(0x80000000);
buffer[count++] = cpu_to_le32(0x80000000);
for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
for (ext = sect->section; ext->extent != NULL; ++ext) {
if (sect->id == SECT_CONTEXT) {
buffer[count++] =
cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
buffer[count++] = cpu_to_le32(ext->reg_index -
PACKET3_SET_CONTEXT_REG_START);
for (i = 0; i < ext->reg_count; i++)
buffer[count++] = cpu_to_le32(ext->extent[i]);
} else {
return;
}
}
}
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
buffer[count++] = cpu_to_le32(0);
}
static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
{
struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
uint32_t pg_always_on_cu_num = 2;
uint32_t always_on_cu_num;
uint32_t i, j, k;
uint32_t mask, cu_bitmap, counter;
if (adev->flags & AMD_IS_APU)
always_on_cu_num = 4;
else if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 2, 1))
always_on_cu_num = 8;
else
always_on_cu_num = 12;
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
mask = 1;
cu_bitmap = 0;
counter = 0;
amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
if (cu_info->bitmap[0][i][j] & mask) {
if (counter == pg_always_on_cu_num)
WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
if (counter < always_on_cu_num)
cu_bitmap |= mask;
else
break;
counter++;
}
mask <<= 1;
}
WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, cu_bitmap);
cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
}
}
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
}
static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
{
uint32_t data;
/* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7);
WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16));
/* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
/* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500);
mutex_lock(&adev->grbm_idx_mutex);
/* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
/* set mmRLC_LB_PARAMS = 0x003F_1006 */
data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
/* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
data &= 0x0000FFFF;
data |= 0x00C00000;
WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
/*
* RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xF (4 CUs AON for Raven),
* programmed in gfx_v9_0_init_always_on_cu_mask()
*/
/* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
* but used for RLC_LB_CNTL configuration */
data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
mutex_unlock(&adev->grbm_idx_mutex);
gfx_v9_0_init_always_on_cu_mask(adev);
}
static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
{
uint32_t data;
/* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x033388F8);
WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x10 | 0x27 << 8 | 0x02FA << 16));
/* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
/* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000800);
mutex_lock(&adev->grbm_idx_mutex);
/* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
/* set mmRLC_LB_PARAMS = 0x003F_1006 */
data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
/* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
data &= 0x0000FFFF;
data |= 0x00C00000;
WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
/*
* RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF (12 CUs AON),
* programmed in gfx_v9_0_init_always_on_cu_mask()
*/
/* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
* but used for RLC_LB_CNTL configuration */
data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
mutex_unlock(&adev->grbm_idx_mutex);
gfx_v9_0_init_always_on_cu_mask(adev);
}
static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
{
WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
}
static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
{
if (gfx_v9_0_load_mec2_fw_bin_support(adev))
return 5;
else
return 4;
}
static void gfx_v9_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
{
struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0];
reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG1);
reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG2);
reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG3);
reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL);
reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX);
reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT);
adev->gfx.rlc.rlcg_reg_access_supported = true;
}
static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
{
const struct cs_section_def *cs_data;
int r;
adev->gfx.rlc.cs_data = gfx9_cs_data;
cs_data = adev->gfx.rlc.cs_data;
if (cs_data) {
/* init clear state block */
r = amdgpu_gfx_rlc_init_csb(adev);
if (r)
return r;
}
if (adev->flags & AMD_IS_APU) {
/* TODO: double check the cp_table_size for RV */
adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
r = amdgpu_gfx_rlc_init_cpt(adev);
if (r)
return r;
}
return 0;
}
static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
{
amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
}
static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
{
int r;
u32 *hpd;
const __le32 *fw_data;
unsigned fw_size;
u32 *fw;
size_t mec_hpd_size;
const struct gfx_firmware_header_v1_0 *mec_hdr;
bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
/* take ownership of the relevant compute queues */
amdgpu_gfx_compute_queue_acquire(adev);
mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
if (mec_hpd_size) {
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.mec.hpd_eop_obj,
&adev->gfx.mec.hpd_eop_gpu_addr,
(void **)&hpd);
if (r) {
dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
gfx_v9_0_mec_fini(adev);
return r;
}
memset(hpd, 0, mec_hpd_size);
amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
}
mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
fw_data = (const __le32 *)
(adev->gfx.mec_fw->data +
le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.mec.mec_fw_obj,
&adev->gfx.mec.mec_fw_gpu_addr,
(void **)&fw);
if (r) {
dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
gfx_v9_0_mec_fini(adev);
return r;
}
memcpy(fw, fw_data, fw_size);
amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
return 0;
}
static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
{
WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
(address << SQ_IND_INDEX__INDEX__SHIFT) |
(SQ_IND_INDEX__FORCE_READ_MASK));
return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
}
static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
uint32_t wave, uint32_t thread,
uint32_t regno, uint32_t num, uint32_t *out)
{
WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
(regno << SQ_IND_INDEX__INDEX__SHIFT) |
(thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
(SQ_IND_INDEX__FORCE_READ_MASK) |
(SQ_IND_INDEX__AUTO_INCR_MASK));
while (num--)
*(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
}
static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
{
/* type 1 wave data */
dst[(*no_fields)++] = 1;
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
}
static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
uint32_t wave, uint32_t start,
uint32_t size, uint32_t *dst)
{
wave_read_regs(
adev, simd, wave, 0,
start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
}
static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
uint32_t wave, uint32_t thread,
uint32_t start, uint32_t size,
uint32_t *dst)
{
wave_read_regs(
adev, simd, wave, thread,
start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
}
static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
{
soc15_grbm_select(adev, me, pipe, q, vm, 0);
}
static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v9_0_select_se_sh,
.read_wave_data = &gfx_v9_0_read_wave_data,
.read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
.read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
.select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
};
const struct amdgpu_ras_block_hw_ops gfx_v9_0_ras_ops = {
.ras_error_inject = &gfx_v9_0_ras_error_inject,
.query_ras_error_count = &gfx_v9_0_query_ras_error_count,
.reset_ras_error_count = &gfx_v9_0_reset_ras_error_count,
};
static struct amdgpu_gfx_ras gfx_v9_0_ras = {
.ras_block = {
.hw_ops = &gfx_v9_0_ras_ops,
},
};
static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
{
u32 gb_addr_config;
int err;
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 0, 1):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
break;
case IP_VERSION(9, 2, 1):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
DRM_INFO("fix gfx.config for vega12\n");
break;
case IP_VERSION(9, 4, 0):
adev->gfx.ras = &gfx_v9_0_ras;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
gb_addr_config &= ~0xf3e777ff;
gb_addr_config |= 0x22014042;
/* check vbios table if gpu info is not available */
err = amdgpu_atomfirmware_get_gfx_info(adev);
if (err)
return err;
break;
case IP_VERSION(9, 2, 2):
case IP_VERSION(9, 1, 0):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN;
else
gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
break;
case IP_VERSION(9, 4, 1):
adev->gfx.ras = &gfx_v9_4_ras;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
gb_addr_config &= ~0xf3e777ff;
gb_addr_config |= 0x22014042;
break;
case IP_VERSION(9, 3, 0):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x80;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
gb_addr_config &= ~0xf3e777ff;
gb_addr_config |= 0x22010042;
break;
case IP_VERSION(9, 4, 2):
adev->gfx.ras = &gfx_v9_4_2_ras;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
gb_addr_config &= ~0xf3e777ff;
gb_addr_config |= 0x22014042;
/* check vbios table if gpu info is not available */
err = amdgpu_atomfirmware_get_gfx_info(adev);
if (err)
return err;
break;
default:
BUG();
break;
}
adev->gfx.config.gb_addr_config = gb_addr_config;
adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
REG_GET_FIELD(
adev->gfx.config.gb_addr_config,
GB_ADDR_CONFIG,
NUM_PIPES);
adev->gfx.config.max_tile_pipes =
adev->gfx.config.gb_addr_config_fields.num_pipes;
adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
REG_GET_FIELD(
adev->gfx.config.gb_addr_config,
GB_ADDR_CONFIG,
NUM_BANKS);
adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
REG_GET_FIELD(
adev->gfx.config.gb_addr_config,
GB_ADDR_CONFIG,
MAX_COMPRESSED_FRAGS);
adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
REG_GET_FIELD(
adev->gfx.config.gb_addr_config,
GB_ADDR_CONFIG,
NUM_RB_PER_SE);
adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
REG_GET_FIELD(
adev->gfx.config.gb_addr_config,
GB_ADDR_CONFIG,
NUM_SHADER_ENGINES);
adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
REG_GET_FIELD(
adev->gfx.config.gb_addr_config,
GB_ADDR_CONFIG,
PIPE_INTERLEAVE_SIZE));
return 0;
}
static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
int mec, int pipe, int queue)
{
unsigned irq_type;
struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
unsigned int hw_prio;
ring = &adev->gfx.compute_ring[ring_id];
/* mec0 is me1 */
ring->me = mec + 1;
ring->pipe = pipe;
ring->queue = queue;
ring->ring_obj = NULL;
ring->use_doorbell = true;
ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
+ (ring_id * GFX9_MEC_HPD_SIZE);
ring->vm_hub = AMDGPU_GFXHUB(0);
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
+ ring->pipe;
hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
AMDGPU_RING_PRIO_2 : AMDGPU_RING_PRIO_DEFAULT;
/* type-2 packets are deprecated on MEC, use type-3 instead */
return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
hw_prio, NULL);
}
static int gfx_v9_0_sw_init(void *handle)
{
int i, j, k, r, ring_id;
struct amdgpu_ring *ring;
struct amdgpu_kiq *kiq;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
unsigned int hw_prio;
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 0, 1):
case IP_VERSION(9, 2, 1):
case IP_VERSION(9, 4, 0):
case IP_VERSION(9, 2, 2):
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 3, 0):
case IP_VERSION(9, 4, 2):
adev->gfx.mec.num_mec = 2;
break;
default:
adev->gfx.mec.num_mec = 1;
break;
}
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 8;
/* EOP Event */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
if (r)
return r;
/* Privileged reg */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
&adev->gfx.priv_reg_irq);
if (r)
return r;
/* Privileged inst */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
&adev->gfx.priv_inst_irq);
if (r)
return r;
/* ECC error */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_ECC_ERROR,
&adev->gfx.cp_ecc_error_irq);
if (r)
return r;
/* FUE error */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_FUE_ERROR,
&adev->gfx.cp_ecc_error_irq);
if (r)
return r;
adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
if (adev->gfx.rlc.funcs) {
if (adev->gfx.rlc.funcs->init) {
r = adev->gfx.rlc.funcs->init(adev);
if (r) {
dev_err(adev->dev, "Failed to init rlc BOs!\n");
return r;
}
}
}
r = gfx_v9_0_mec_init(adev);
if (r) {
DRM_ERROR("Failed to init MEC BOs!\n");
return r;
}
/* set up the gfx ring */
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i];
ring->ring_obj = NULL;
if (!i)
sprintf(ring->name, "gfx");
else
sprintf(ring->name, "gfx_%d", i);
ring->use_doorbell = true;
ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
/* disable scheduler on the real ring */
ring->no_scheduler = true;
ring->vm_hub = AMDGPU_GFXHUB(0);
r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
/* set up the software rings */
if (adev->gfx.num_gfx_rings) {
for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++) {
ring = &adev->gfx.sw_gfx_ring[i];
ring->ring_obj = NULL;
sprintf(ring->name, amdgpu_sw_ring_name(i));
ring->use_doorbell = true;
ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
ring->is_sw_ring = true;
hw_prio = amdgpu_sw_ring_priority(i);
ring->vm_hub = AMDGPU_GFXHUB(0);
r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, hw_prio,
NULL);
if (r)
return r;
ring->wptr = 0;
}
/* init the muxer and add software rings */
r = amdgpu_ring_mux_init(&adev->gfx.muxer, &adev->gfx.gfx_ring[0],
GFX9_NUM_SW_GFX_RINGS);
if (r) {
DRM_ERROR("amdgpu_ring_mux_init failed(%d)\n", r);
return r;
}
for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++) {
r = amdgpu_ring_mux_add_sw_ring(&adev->gfx.muxer,
&adev->gfx.sw_gfx_ring[i]);
if (r) {
DRM_ERROR("amdgpu_ring_mux_add_sw_ring failed(%d)\n", r);
return r;
}
}
}
/* set up the compute queues - allocate horizontally across pipes */
ring_id = 0;
for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
k, j))
continue;
r = gfx_v9_0_compute_ring_init(adev,
ring_id,
i, k, j);
if (r)
return r;
ring_id++;
}
}
}
r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0);
if (r) {
DRM_ERROR("Failed to init KIQ BOs!\n");
return r;
}
kiq = &adev->gfx.kiq[0];
r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0);
if (r)
return r;
/* create MQD for all compute queues as wel as KIQ for SRIOV case */
r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation), 0);
if (r)
return r;
adev->gfx.ce_ram_size = 0x8000;
r = gfx_v9_0_gpu_early_init(adev);
if (r)
return r;
if (amdgpu_gfx_ras_sw_init(adev)) {
dev_err(adev->dev, "Failed to initialize gfx ras block!\n");
return -EINVAL;
}
return 0;
}
static int gfx_v9_0_sw_fini(void *handle)
{
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->gfx.num_gfx_rings) {
for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++)
amdgpu_ring_fini(&adev->gfx.sw_gfx_ring[i]);
amdgpu_ring_mux_fini(&adev->gfx.muxer);
}
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
for (i = 0; i < adev->gfx.num_compute_rings; i++)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
amdgpu_gfx_mqd_sw_fini(adev, 0);
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
amdgpu_gfx_kiq_fini(adev, 0);
gfx_v9_0_mec_fini(adev);
amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr,
(void **)&adev->gfx.rlc.cs_ptr);
if (adev->flags & AMD_IS_APU) {
amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
&adev->gfx.rlc.cp_table_gpu_addr,
(void **)&adev->gfx.rlc.cp_table_ptr);
}
gfx_v9_0_free_microcode(adev);
return 0;
}
static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
{
/* TODO */
}
void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num,
u32 instance, int xcc_id)
{
u32 data;
if (instance == 0xffffffff)
data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
else
data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
if (se_num == 0xffffffff)
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
else
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
if (sh_num == 0xffffffff)
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
else
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
}
static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
{
u32 data, mask;
data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
adev->gfx.config.max_sh_per_se);
return (~data) & mask;
}
static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
{
int i, j;
u32 data;
u32 active_rbs = 0;
u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
adev->gfx.config.max_sh_per_se;
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
data = gfx_v9_0_get_rb_active_bitmap(adev);
active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
rb_bitmap_width_per_sh);
}
}
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
adev->gfx.config.backend_enable_mask = active_rbs;
adev->gfx.config.num_rbs = hweight32(active_rbs);
}
static void gfx_v9_0_debug_trap_config_init(struct amdgpu_device *adev,
uint32_t first_vmid,
uint32_t last_vmid)
{
uint32_t data;
uint32_t trap_config_vmid_mask = 0;
int i;
/* Calculate trap config vmid mask */
for (i = first_vmid; i < last_vmid; i++)
trap_config_vmid_mask |= (1 << i);
data = REG_SET_FIELD(0, SPI_GDBG_TRAP_CONFIG,
VMID_SEL, trap_config_vmid_mask);
data = REG_SET_FIELD(data, SPI_GDBG_TRAP_CONFIG,
TRAP_EN, 1);
WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_CONFIG), data);
WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA0), 0);
WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA1), 0);
}
#define DEFAULT_SH_MEM_BASES (0x6000)
static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
{
int i;
uint32_t sh_mem_config;
uint32_t sh_mem_bases;
/*
* Configure apertures:
* LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
* Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
* GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
*/
sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
mutex_lock(&adev->srbm_mutex);
for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
soc15_grbm_select(adev, 0, 0, 0, i, 0);
/* CP and shaders */
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
}
soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
/* Initialize all compute VMIDs to have no GDS, GWS, or OA
access. These should be enabled by FW for target VMIDs. */
for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
}
}
static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev)
{
int vmid;
/*
* Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
* access. Compute VMIDs should be enabled by FW for target VMIDs,
* the driver can enable them for graphics. VMID0 should maintain
* access so that HWS firmware can save/restore entries.
*/
for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * vmid, 0);
WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * vmid, 0);
WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, vmid, 0);
WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, vmid, 0);
}
}
static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
{
uint32_t tmp;
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 4, 1):
tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG);
tmp = REG_SET_FIELD(tmp, SQ_CONFIG, DISABLE_BARRIER_WAITCNT,
!READ_ONCE(adev->barrier_has_auto_waitcnt));
WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp);
break;
default:
break;
}
}
static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
{
u32 tmp;
int i;
WREG32_FIELD15_RLC(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
gfx_v9_0_tiling_mode_table_init(adev);
if (adev->gfx.num_gfx_rings)
gfx_v9_0_setup_rb(adev);
gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
/* XXX SH_MEM regs */
/* where to put LDS, scratch, GPUVM in FSA64 space */
mutex_lock(&adev->srbm_mutex);
for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
soc15_grbm_select(adev, 0, 0, 0, i, 0);
/* CP and shaders */
if (i == 0) {
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
SH_MEM_ALIGNMENT_MODE_UNALIGNED);
tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
!!adev->gmc.noretry);
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, 0);
} else {
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
SH_MEM_ALIGNMENT_MODE_UNALIGNED);
tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
!!adev->gmc.noretry);
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
(adev->gmc.private_aperture_start >> 48));
tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
(adev->gmc.shared_aperture_start >> 48));
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, tmp);
}
}
soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
gfx_v9_0_init_compute_vmid(adev);
gfx_v9_0_init_gds_vmid(adev);
gfx_v9_0_init_sq_config(adev);
}
static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
{
u32 i, j, k;
u32 mask;
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
for (k = 0; k < adev->usec_timeout; k++) {
if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
break;
udelay(1);
}
if (k == adev->usec_timeout) {
amdgpu_gfx_select_se_sh(adev, 0xffffffff,
0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
i, j);
return;
}
}
}
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
for (k = 0; k < adev->usec_timeout; k++) {
if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
break;
udelay(1);
}
}
static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
bool enable)
{
u32 tmp;
/* These interrupts should be enabled to drive DS clock */
tmp= RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
if(adev->gfx.num_gfx_rings)
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
}
static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
{
adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
/* csib */
WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
adev->gfx.rlc.clear_state_gpu_addr >> 32);
WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
adev->gfx.rlc.clear_state_size);
}
static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
int indirect_offset,
int list_size,
int *unique_indirect_regs,
int unique_indirect_reg_count,
int *indirect_start_offsets,
int *indirect_start_offsets_count,
int max_start_offsets_count)
{
int idx;
for (; indirect_offset < list_size; indirect_offset++) {
WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count);
indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
*indirect_start_offsets_count = *indirect_start_offsets_count + 1;
while (register_list_format[indirect_offset] != 0xFFFFFFFF) {
indirect_offset += 2;
/* look for the matching indice */
for (idx = 0; idx < unique_indirect_reg_count; idx++) {
if (unique_indirect_regs[idx] ==
register_list_format[indirect_offset] ||
!unique_indirect_regs[idx])
break;
}
BUG_ON(idx >= unique_indirect_reg_count);
if (!unique_indirect_regs[idx])
unique_indirect_regs[idx] = register_list_format[indirect_offset];
indirect_offset++;
}
}
}
static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
{
int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
int unique_indirect_reg_count = 0;
int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
int indirect_start_offsets_count = 0;
int list_size = 0;
int i = 0, j = 0;
u32 tmp = 0;
u32 *register_list_format =
kmemdup(adev->gfx.rlc.register_list_format,
adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
if (!register_list_format)
return -ENOMEM;
/* setup unique_indirect_regs array and indirect_start_offsets array */
unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs);
gfx_v9_1_parse_ind_reg_list(register_list_format,
adev->gfx.rlc.reg_list_format_direct_reg_list_length,
adev->gfx.rlc.reg_list_format_size_bytes >> 2,
unique_indirect_regs,
unique_indirect_reg_count,
indirect_start_offsets,
&indirect_start_offsets_count,
ARRAY_SIZE(indirect_start_offsets));
/* enable auto inc in case it is disabled */
tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
/* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR),
RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET);
for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
adev->gfx.rlc.register_restore[i]);
/* load indirect register */
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
adev->gfx.rlc.reg_list_format_start);
/* direct register portion */
for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
register_list_format[i]);
/* indirect register portion */
while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) {
if (register_list_format[i] == 0xFFFFFFFF) {
WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
continue;
}
WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
for (j = 0; j < unique_indirect_reg_count; j++) {
if (register_list_format[i] == unique_indirect_regs[j]) {
WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j);
break;
}
}
BUG_ON(j >= unique_indirect_reg_count);
i++;
}
/* set save/restore list size */
list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
list_size = list_size >> 1;
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
adev->gfx.rlc.reg_restore_list_size);
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size);
/* write the starting offsets to RLC scratch ram */
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
adev->gfx.rlc.starting_offsets_start);
for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
indirect_start_offsets[i]);
/* load unique indirect regs*/
for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
if (unique_indirect_regs[i] != 0) {
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0)
+ GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i],
unique_indirect_regs[i] & 0x3FFFF);
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0)
+ GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i],
unique_indirect_regs[i] >> 20);
}
}
kfree(register_list_format);
return 0;
}
static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
{
WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
}
static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
bool enable)
{
uint32_t data = 0;
uint32_t default_data = 0;
default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
if (enable) {
/* enable GFXIP control over CGPG */
data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
if(default_data != data)
WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
/* update status */
data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK;
data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT);
if(default_data != data)
WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
} else {
/* restore GFXIP control over GCPG */
data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
if(default_data != data)
WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
}
}
static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
{
uint32_t data = 0;
if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_GFX_DMG)) {
/* init IDLE_POLL_COUNT = 60 */
data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
/* init RLC PG Delay */
data = 0;
data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data);
data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2));
data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data);
data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3));
data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK;
data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT);
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data);
data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL));
data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
/* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 3, 0))
pwr_10_0_gfxip_control_over_cgpg(adev, true);
}
}
static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
bool enable)
{
uint32_t data = 0;
uint32_t default_data = 0;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
data = REG_SET_FIELD(data, RLC_PG_CNTL,
SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
enable ? 1 : 0);
if (default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
bool enable)
{
uint32_t data = 0;
uint32_t default_data = 0;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
data = REG_SET_FIELD(data, RLC_PG_CNTL,
SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
enable ? 1 : 0);
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t data = 0;
uint32_t default_data = 0;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
data = REG_SET_FIELD(data, RLC_PG_CNTL,
CP_PG_DISABLE,
enable ? 0 : 1);
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t data, default_data;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
data = REG_SET_FIELD(data, RLC_PG_CNTL,
GFX_POWER_GATING_ENABLE,
enable ? 1 : 0);
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
bool enable)
{
uint32_t data, default_data;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
data = REG_SET_FIELD(data, RLC_PG_CNTL,
GFX_PIPELINE_PG_ENABLE,
enable ? 1 : 0);
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
if (!enable)
/* read any GFX register to wake up GFX */
data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
}
static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t data, default_data;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
data = REG_SET_FIELD(data, RLC_PG_CNTL,
STATIC_PER_CU_PG_ENABLE,
enable ? 1 : 0);
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t data, default_data;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
data = REG_SET_FIELD(data, RLC_PG_CNTL,
DYN_PER_CU_PG_ENABLE,
enable ? 1 : 0);
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
{
gfx_v9_0_init_csb(adev);
/*
* Rlc save restore list is workable since v2_1.
* And it's needed by gfxoff feature.
*/
if (adev->gfx.rlc.is_rlc_v2_1) {
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 2, 1) ||
(adev->apu_flags & AMD_APU_IS_RAVEN2))
gfx_v9_1_init_rlc_save_restore_list(adev);
gfx_v9_0_enable_save_restore_machine(adev);
}
if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_GFX_DMG |
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_GDS |
AMD_PG_SUPPORT_RLC_SMU_HS)) {
WREG32_SOC15(GC, 0, mmRLC_JUMP_TABLE_RESTORE,
adev->gfx.rlc.cp_table_gpu_addr >> 8);
gfx_v9_0_init_gfx_power_gating(adev);
}
}
static void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
{
WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
gfx_v9_0_enable_gui_idle_interrupt(adev, false);
gfx_v9_0_wait_for_rlc_serdes(adev);
}
static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
{
WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
udelay(50);
WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
udelay(50);
}
static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
{
#ifdef AMDGPU_RLC_DEBUG_RETRY
u32 rlc_ucode_ver;
#endif
WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
udelay(50);
/* carrizo do enable cp interrupt after cp inited */
if (!(adev->flags & AMD_IS_APU)) {
gfx_v9_0_enable_gui_idle_interrupt(adev, true);
udelay(50);
}
#ifdef AMDGPU_RLC_DEBUG_RETRY
/* RLC_GPM_GENERAL_6 : RLC Ucode version */
rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
if(rlc_ucode_ver == 0x108) {
DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
rlc_ucode_ver, adev->gfx.rlc_fw_version);
/* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
* default is 0x9C4 to create a 100us interval */
WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
/* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
* to disable the page fault retry interrupts, default is
* 0x100 (256) */
WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
}
#endif
}
static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
{
const struct rlc_firmware_header_v2_0 *hdr;
const __le32 *fw_data;
unsigned i, fw_size;
if (!adev->gfx.rlc_fw)
return -EINVAL;
hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
amdgpu_ucode_print_rlc_hdr(&hdr->header);
fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
RLCG_UCODE_LOADING_START_ADDRESS);
for (i = 0; i < fw_size; i++)
WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
return 0;
}
static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
{
int r;
if (amdgpu_sriov_vf(adev)) {
gfx_v9_0_init_csb(adev);
return 0;
}
adev->gfx.rlc.funcs->stop(adev);
/* disable CG */
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
gfx_v9_0_init_pg(adev);
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
/* legacy rlc firmware loading */
r = gfx_v9_0_rlc_load_microcode(adev);
if (r)
return r;
}
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 2, 2):
case IP_VERSION(9, 1, 0):
gfx_v9_0_init_lbpw(adev);
if (amdgpu_lbpw == 0)
gfx_v9_0_enable_lbpw(adev, false);
else
gfx_v9_0_enable_lbpw(adev, true);
break;
case IP_VERSION(9, 4, 0):
gfx_v9_4_init_lbpw(adev);
if (amdgpu_lbpw > 0)
gfx_v9_0_enable_lbpw(adev, true);
else
gfx_v9_0_enable_lbpw(adev, false);
break;
default:
break;
}
gfx_v9_0_update_spm_vmid_internal(adev, 0xf);
adev->gfx.rlc.funcs->start(adev);
return 0;
}
static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
{
u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
udelay(50);
}
static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
{
const struct gfx_firmware_header_v1_0 *pfp_hdr;
const struct gfx_firmware_header_v1_0 *ce_hdr;
const struct gfx_firmware_header_v1_0 *me_hdr;
const __le32 *fw_data;
unsigned i, fw_size;
if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
return -EINVAL;
pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
adev->gfx.pfp_fw->data;
ce_hdr = (const struct gfx_firmware_header_v1_0 *)
adev->gfx.ce_fw->data;
me_hdr = (const struct gfx_firmware_header_v1_0 *)
adev->gfx.me_fw->data;
amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
gfx_v9_0_cp_gfx_enable(adev, false);
/* PFP */
fw_data = (const __le32 *)
(adev->gfx.pfp_fw->data +
le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
for (i = 0; i < fw_size; i++)
WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
/* CE */
fw_data = (const __le32 *)
(adev->gfx.ce_fw->data +
le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
for (i = 0; i < fw_size; i++)
WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
/* ME */
fw_data = (const __le32 *)
(adev->gfx.me_fw->data +
le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
for (i = 0; i < fw_size; i++)
WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
return 0;
}
static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
const struct cs_section_def *sect = NULL;
const struct cs_extent_def *ext = NULL;
int r, i, tmp;
/* init the CP */
WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
gfx_v9_0_cp_gfx_enable(adev, true);
r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
if (r) {
DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
return r;
}
amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
amdgpu_ring_write(ring, 0x80000000);
amdgpu_ring_write(ring, 0x80000000);
for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
for (ext = sect->section; ext->extent != NULL; ++ext) {
if (sect->id == SECT_CONTEXT) {
amdgpu_ring_write(ring,
PACKET3(PACKET3_SET_CONTEXT_REG,
ext->reg_count));
amdgpu_ring_write(ring,
ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
for (i = 0; i < ext->reg_count; i++)
amdgpu_ring_write(ring, ext->extent[i]);
}
}
}
amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
amdgpu_ring_write(ring, 0x8000);
amdgpu_ring_write(ring, 0x8000);
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
(SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
amdgpu_ring_write(ring, tmp);
amdgpu_ring_write(ring, 0);
amdgpu_ring_commit(ring);
return 0;
}
static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
u32 tmp;
u32 rb_bufsz;
u64 rb_addr, rptr_addr, wptr_gpu_addr;
/* Set the write pointer delay */
WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
/* set the RB to use vmid 0 */
WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
/* Set ring buffer size */
ring = &adev->gfx.gfx_ring[0];
rb_bufsz = order_base_2(ring->ring_size / 8);
tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
#ifdef __BIG_ENDIAN
tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
#endif
WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
/* Initialize the ring buffer's write pointers */
ring->wptr = 0;
WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
/* set the wb address wether it's enabled or not */
rptr_addr = ring->rptr_gpu_addr;
WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
wptr_gpu_addr = ring->wptr_gpu_addr;
WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
mdelay(1);
WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
rb_addr = ring->gpu_addr >> 8;
WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
if (ring->use_doorbell) {
tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
DOORBELL_OFFSET, ring->doorbell_index);
tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
DOORBELL_EN, 1);
} else {
tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
}
WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
DOORBELL_RANGE_LOWER, ring->doorbell_index);
WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
/* start the ring */
gfx_v9_0_cp_gfx_start(adev);
return 0;
}
static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
{
if (enable) {
WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0);
} else {
WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
(CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
adev->gfx.kiq[0].ring.sched.ready = false;
}
udelay(50);
}
static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
{
const struct gfx_firmware_header_v1_0 *mec_hdr;
const __le32 *fw_data;
unsigned i;
u32 tmp;
if (!adev->gfx.mec_fw)
return -EINVAL;
gfx_v9_0_cp_compute_enable(adev, false);
mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
fw_data = (const __le32 *)
(adev->gfx.mec_fw->data +
le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
tmp = 0;
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
/* MEC1 */
WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
mec_hdr->jt_offset);
for (i = 0; i < mec_hdr->jt_size; i++)
WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
adev->gfx.mec_fw_version);
/* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
return 0;
}
/* KIQ functions */
static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
{
uint32_t tmp;
struct amdgpu_device *adev = ring->adev;
/* tell RLC which is KIQ queue */
tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
tmp |= 0x80;
WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
}
static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
{
struct amdgpu_device *adev = ring->adev;
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
mqd->cp_hqd_queue_priority =
AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
}
}
}
static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct v9_mqd *mqd = ring->mqd_ptr;
uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
uint32_t tmp;
mqd->header = 0xC0310800;
mqd->compute_pipelinestat_enable = 0x00000001;
mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
mqd->compute_static_thread_mgmt_se4 = 0xffffffff;
mqd->compute_static_thread_mgmt_se5 = 0xffffffff;
mqd->compute_static_thread_mgmt_se6 = 0xffffffff;
mqd->compute_static_thread_mgmt_se7 = 0xffffffff;
mqd->compute_misc_reserved = 0x00000003;
mqd->dynamic_cu_mask_addr_lo =
lower_32_bits(ring->mqd_gpu_addr
+ offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
mqd->dynamic_cu_mask_addr_hi =
upper_32_bits(ring->mqd_gpu_addr
+ offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
eop_base_addr = ring->eop_gpu_addr >> 8;
mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
(order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
mqd->cp_hqd_eop_control = tmp;
/* enable doorbell? */
tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
if (ring->use_doorbell) {
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_OFFSET, ring->doorbell_index);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_EN, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_SOURCE, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_HIT, 0);
} else {
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_EN, 0);
}
mqd->cp_hqd_pq_doorbell_control = tmp;
/* disable the queue if it's active */
ring->wptr = 0;
mqd->cp_hqd_dequeue_request = 0;
mqd->cp_hqd_pq_rptr = 0;
mqd->cp_hqd_pq_wptr_lo = 0;
mqd->cp_hqd_pq_wptr_hi = 0;
/* set the pointer to the MQD */
mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
/* set MQD vmid to 0 */
tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
mqd->cp_mqd_control = tmp;
/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
hqd_gpu_addr = ring->gpu_addr >> 8;
mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
/* set up the HQD, this is similar to CP_RB0_CNTL */
tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
(order_base_2(ring->ring_size / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
(order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
#ifdef __BIG_ENDIAN
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
#endif
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
mqd->cp_hqd_pq_control = tmp;
/* set the wb address whether it's enabled or not */
wb_gpu_addr = ring->rptr_gpu_addr;
mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
mqd->cp_hqd_pq_rptr_report_addr_hi =
upper_32_bits(wb_gpu_addr) & 0xffff;
/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
wb_gpu_addr = ring->wptr_gpu_addr;
mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
ring->wptr = 0;
mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
/* set the vmid for the queue */
mqd->cp_hqd_vmid = 0;
tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
mqd->cp_hqd_persistent_state = tmp;
/* set MIN_IB_AVAIL_SIZE */
tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
mqd->cp_hqd_ib_control = tmp;
/* set static priority for a queue/ring */
gfx_v9_0_mqd_set_priority(ring, mqd);
mqd->cp_hqd_quantum = RREG32_SOC15(GC, 0, mmCP_HQD_QUANTUM);
/* map_queues packet doesn't need activate the queue,
* so only kiq need set this field.
*/
if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
mqd->cp_hqd_active = 1;
return 0;
}
static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct v9_mqd *mqd = ring->mqd_ptr;
int j;
/* disable wptr polling */
WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
mqd->cp_hqd_eop_base_addr_lo);
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
mqd->cp_hqd_eop_base_addr_hi);
/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_CONTROL,
mqd->cp_hqd_eop_control);
/* enable doorbell? */
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
mqd->cp_hqd_pq_doorbell_control);
/* disable the queue if it's active */
if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
for (j = 0; j < adev->usec_timeout; j++) {
if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
break;
udelay(1);
}
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
mqd->cp_hqd_dequeue_request);
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR,
mqd->cp_hqd_pq_rptr);
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
mqd->cp_hqd_pq_wptr_lo);
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
mqd->cp_hqd_pq_wptr_hi);
}
/* set the pointer to the MQD */
WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR,
mqd->cp_mqd_base_addr_lo);
WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR_HI,
mqd->cp_mqd_base_addr_hi);
/* set MQD vmid to 0 */
WREG32_SOC15_RLC(GC, 0, mmCP_MQD_CONTROL,
mqd->cp_mqd_control);
/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE,
mqd->cp_hqd_pq_base_lo);
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE_HI,
mqd->cp_hqd_pq_base_hi);
/* set up the HQD, this is similar to CP_RB0_CNTL */
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_CONTROL,
mqd->cp_hqd_pq_control);
/* set the wb address whether it's enabled or not */
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
mqd->cp_hqd_pq_rptr_report_addr_lo);
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
mqd->cp_hqd_pq_rptr_report_addr_hi);
/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
mqd->cp_hqd_pq_wptr_poll_addr_lo);
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
mqd->cp_hqd_pq_wptr_poll_addr_hi);
/* enable the doorbell if requested */
if (ring->use_doorbell) {
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
(adev->doorbell_index.kiq * 2) << 2);
/* If GC has entered CGPG, ringing doorbell > first page
* doesn't wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to
* workaround this issue. And this change has to align with firmware
* update.
*/
if (check_if_enlarge_doorbell_range(adev))
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
(adev->doorbell.size - 4));
else
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
(adev->doorbell_index.userqueue_end * 2) << 2);
}
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
mqd->cp_hqd_pq_doorbell_control);
/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
mqd->cp_hqd_pq_wptr_lo);
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
mqd->cp_hqd_pq_wptr_hi);
/* set the vmid for the queue */
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE,
mqd->cp_hqd_persistent_state);
/* activate the queue */
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE,
mqd->cp_hqd_active);
if (ring->use_doorbell)
WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
return 0;
}
static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
int j;
/* disable the queue if it's active */
if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
for (j = 0; j < adev->usec_timeout; j++) {
if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
break;
udelay(1);
}
if (j == AMDGPU_MAX_USEC_TIMEOUT) {
DRM_DEBUG("KIQ dequeue request failed.\n");
/* Manual disable if dequeue request times out */
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE, 0);
}
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
0);
}
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IQ_TIMER, 0);
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IB_CONTROL, 0);
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR, 0);
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
return 0;
}
static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct v9_mqd *mqd = ring->mqd_ptr;
struct v9_mqd *tmp_mqd;
gfx_v9_0_kiq_setting(ring);
/* GPU could be in bad state during probe, driver trigger the reset
* after load the SMU, in this case , the mqd is not be initialized.
* driver need to re-init the mqd.
* check mqd->cp_hqd_pq_control since this value should not be 0
*/
tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[0].mqd_backup;
if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control){
/* for GPU_RESET case , reset MQD to a clean status */
if (adev->gfx.kiq[0].mqd_backup)
memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(struct v9_mqd_allocation));
/* reset ring buffer */
ring->wptr = 0;
amdgpu_ring_clear_ring(ring);
mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
gfx_v9_0_kiq_init_register(ring);
soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
} else {
memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
if (amdgpu_sriov_vf(adev) && adev->in_suspend)
amdgpu_ring_clear_ring(ring);
mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
gfx_v9_0_mqd_init(ring);
gfx_v9_0_kiq_init_register(ring);
soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.kiq[0].mqd_backup)
memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(struct v9_mqd_allocation));
}
return 0;
}
static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct v9_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
struct v9_mqd *tmp_mqd;
/* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control
* is not be initialized before
*/
tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
if (!tmp_mqd->cp_hqd_pq_control ||
(!amdgpu_in_reset(adev) && !adev->in_suspend)) {
memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
gfx_v9_0_mqd_init(ring);
soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
} else {
/* restore MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
/* reset ring buffer */
ring->wptr = 0;
atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
amdgpu_ring_clear_ring(ring);
}
return 0;
}
static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
int r;
ring = &adev->gfx.kiq[0].ring;
r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0))
return r;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(ring->mqd_obj);
return r;
}
gfx_v9_0_kiq_init_queue(ring);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
amdgpu_bo_unreserve(ring->mqd_obj);
return 0;
}
static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = NULL;
int r = 0, i;
gfx_v9_0_cp_compute_enable(adev, true);
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0))
goto done;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
r = gfx_v9_0_kcq_init_queue(ring);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
amdgpu_bo_unreserve(ring->mqd_obj);
if (r)
goto done;
}
r = amdgpu_gfx_enable_kcq(adev, 0);
done:
return r;
}
static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
{
int r, i;
struct amdgpu_ring *ring;
if (!(adev->flags & AMD_IS_APU))
gfx_v9_0_enable_gui_idle_interrupt(adev, false);
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
if (adev->gfx.num_gfx_rings) {
/* legacy firmware loading */
r = gfx_v9_0_cp_gfx_load_microcode(adev);
if (r)
return r;
}
r = gfx_v9_0_cp_compute_load_microcode(adev);
if (r)
return r;
}
r = gfx_v9_0_kiq_resume(adev);
if (r)
return r;
if (adev->gfx.num_gfx_rings) {
r = gfx_v9_0_cp_gfx_resume(adev);
if (r)
return r;
}
r = gfx_v9_0_kcq_resume(adev);
if (r)
return r;
if (adev->gfx.num_gfx_rings) {
ring = &adev->gfx.gfx_ring[0];
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
}
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
amdgpu_ring_test_helper(ring);
}
gfx_v9_0_enable_gui_idle_interrupt(adev, true);
return 0;
}
static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev)
{
u32 tmp;
if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1) &&
adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 2))
return;
tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG);
tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE64KHASH,
adev->df.hash_status.hash_64k);
tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE2MHASH,
adev->df.hash_status.hash_2m);
tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE1GHASH,
adev->df.hash_status.hash_1g);
WREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG, tmp);
}
static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
{
if (adev->gfx.num_gfx_rings)
gfx_v9_0_cp_gfx_enable(adev, enable);
gfx_v9_0_cp_compute_enable(adev, enable);
}
static int gfx_v9_0_hw_init(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!amdgpu_sriov_vf(adev))
gfx_v9_0_init_golden_registers(adev);
gfx_v9_0_constants_init(adev);
gfx_v9_0_init_tcp_config(adev);
r = adev->gfx.rlc.funcs->resume(adev);
if (r)
return r;
r = gfx_v9_0_cp_resume(adev);
if (r)
return r;
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
gfx_v9_4_2_set_power_brake_sequence(adev);
return r;
}
static int gfx_v9_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
/* DF freeze and kcq disable will fail */
if (!amdgpu_ras_intr_triggered())
/* disable KCQ to avoid CPC touch memory not valid anymore */
amdgpu_gfx_disable_kcq(adev, 0);
if (amdgpu_sriov_vf(adev)) {
gfx_v9_0_cp_gfx_enable(adev, false);
/* must disable polling for SRIOV when hw finished, otherwise
* CPC engine may still keep fetching WB address which is already
* invalid after sw finished and trigger DMAR reading error in
* hypervisor side.
*/
WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
return 0;
}
/* Use deinitialize sequence from CAIL when unbinding device from driver,
* otherwise KIQ is hanging when binding back
*/
if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, adev->gfx.kiq[0].ring.me,
adev->gfx.kiq[0].ring.pipe,
adev->gfx.kiq[0].ring.queue, 0, 0);
gfx_v9_0_kiq_fini_register(&adev->gfx.kiq[0].ring);
soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
gfx_v9_0_cp_enable(adev, false);
/* Skip stopping RLC with A+A reset or when RLC controls GFX clock */
if ((adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) ||
(adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2))) {
dev_dbg(adev->dev, "Skipping RLC halt\n");
return 0;
}
adev->gfx.rlc.funcs->stop(adev);
return 0;
}
static int gfx_v9_0_suspend(void *handle)
{
return gfx_v9_0_hw_fini(handle);
}
static int gfx_v9_0_resume(void *handle)
{
return gfx_v9_0_hw_init(handle);
}
static bool gfx_v9_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
GRBM_STATUS, GUI_ACTIVE))
return false;
else
return true;
}
static int gfx_v9_0_wait_for_idle(void *handle)
{
unsigned i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
if (gfx_v9_0_is_idle(handle))
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
static int gfx_v9_0_soft_reset(void *handle)
{
u32 grbm_soft_reset = 0;
u32 tmp;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* GRBM_STATUS */
tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
}
if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
}
/* GRBM_STATUS2 */
tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
if (grbm_soft_reset) {
/* stop the rlc */
adev->gfx.rlc.funcs->stop(adev);
if (adev->gfx.num_gfx_rings)
/* Disable GFX parsing/prefetching */
gfx_v9_0_cp_gfx_enable(adev, false);
/* Disable MEC parsing/prefetching */
gfx_v9_0_cp_compute_enable(adev, false);
if (grbm_soft_reset) {
tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
tmp |= grbm_soft_reset;
dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
udelay(50);
tmp &= ~grbm_soft_reset;
WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
}
/* Wait a little for things to settle down */
udelay(50);
}
return 0;
}
static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
{
signed long r, cnt = 0;
unsigned long flags;
uint32_t seq, reg_val_offs = 0;
uint64_t value = 0;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *ring = &kiq->ring;
BUG_ON(!ring->funcs->emit_rreg);
spin_lock_irqsave(&kiq->ring_lock, flags);
if (amdgpu_device_wb_get(adev, ®_val_offs)) {
pr_err("critical bug! too many kiq readers\n");
goto failed_unlock;
}
amdgpu_ring_alloc(ring, 32);
amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
amdgpu_ring_write(ring, 9 | /* src: register*/
(5 << 8) | /* dst: memory */
(1 << 16) | /* count sel */
(1 << 20)); /* write confirm */
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
reg_val_offs * 4));
amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
reg_val_offs * 4));
r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
if (r)
goto failed_undo;
amdgpu_ring_commit(ring);
spin_unlock_irqrestore(&kiq->ring_lock, flags);
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
/* don't wait anymore for gpu reset case because this way may
* block gpu_recover() routine forever, e.g. this virt_kiq_rreg
* is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
* never return if we keep waiting in virt_kiq_rreg, which cause
* gpu_recover() hang there.
*
* also don't wait anymore for IRQ context
* */
if (r < 1 && (amdgpu_in_reset(adev)))
goto failed_kiq_read;
might_sleep();
while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
}
if (cnt > MAX_KIQ_REG_TRY)
goto failed_kiq_read;
mb();
value = (uint64_t)adev->wb.wb[reg_val_offs] |
(uint64_t)adev->wb.wb[reg_val_offs + 1 ] << 32ULL;
amdgpu_device_wb_free(adev, reg_val_offs);
return value;
failed_undo:
amdgpu_ring_undo(ring);
failed_unlock:
spin_unlock_irqrestore(&kiq->ring_lock, flags);
failed_kiq_read:
if (reg_val_offs)
amdgpu_device_wb_free(adev, reg_val_offs);
pr_err("failed to read gpu clock\n");
return ~0;
}
static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{
uint64_t clock, clock_lo, clock_hi, hi_check;
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 3, 0):
preempt_disable();
clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir);
clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir);
hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir);
/* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over
* roughly every 42 seconds.
*/
if (hi_check != clock_hi) {
clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir);
clock_hi = hi_check;
}
preempt_enable();
clock = clock_lo | (clock_hi << 32ULL);
break;
default:
amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->gfx.gpu_clock_mutex);
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 0, 1) && amdgpu_sriov_runtime(adev)) {
clock = gfx_v9_0_kiq_read_clock(adev);
} else {
WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
}
mutex_unlock(&adev->gfx.gpu_clock_mutex);
amdgpu_gfx_off_ctrl(adev, true);
break;
}
return clock;
}
static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
uint32_t vmid,
uint32_t gds_base, uint32_t gds_size,
uint32_t gws_base, uint32_t gws_size,
uint32_t oa_base, uint32_t oa_size)
{
struct amdgpu_device *adev = ring->adev;
/* GDS Base */
gfx_v9_0_write_data_to_reg(ring, 0, false,
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
gds_base);
/* GDS Size */
gfx_v9_0_write_data_to_reg(ring, 0, false,
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
gds_size);
/* GWS */
gfx_v9_0_write_data_to_reg(ring, 0, false,
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
/* OA */
gfx_v9_0_write_data_to_reg(ring, 0, false,
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
(1 << (oa_size + oa_base)) - (1 << oa_base));
}
static const u32 vgpr_init_compute_shader[] =
{
0xb07c0000, 0xbe8000ff,
0x000000f8, 0xbf110800,
0x7e000280, 0x7e020280,
0x7e040280, 0x7e060280,
0x7e080280, 0x7e0a0280,
0x7e0c0280, 0x7e0e0280,
0x80808800, 0xbe803200,
0xbf84fff5, 0xbf9c0000,
0xd28c0001, 0x0001007f,
0xd28d0001, 0x0002027e,
0x10020288, 0xb8810904,
0xb7814000, 0xd1196a01,
0x00000301, 0xbe800087,
0xbefc00c1, 0xd89c4000,
0x00020201, 0xd89cc080,
0x00040401, 0x320202ff,
0x00000800, 0x80808100,
0xbf84fff8, 0x7e020280,
0xbf810000, 0x00000000,
};
static const u32 sgpr_init_compute_shader[] =
{
0xb07c0000, 0xbe8000ff,
0x0000005f, 0xbee50080,
0xbe812c65, 0xbe822c65,
0xbe832c65, 0xbe842c65,
0xbe852c65, 0xb77c0005,
0x80808500, 0xbf84fff8,
0xbe800080, 0xbf810000,
};
static const u32 vgpr_init_compute_shader_arcturus[] = {
0xd3d94000, 0x18000080, 0xd3d94001, 0x18000080, 0xd3d94002, 0x18000080,
0xd3d94003, 0x18000080, 0xd3d94004, 0x18000080, 0xd3d94005, 0x18000080,
0xd3d94006, 0x18000080, 0xd3d94007, 0x18000080, 0xd3d94008, 0x18000080,
0xd3d94009, 0x18000080, 0xd3d9400a, 0x18000080, 0xd3d9400b, 0x18000080,
0xd3d9400c, 0x18000080, 0xd3d9400d, 0x18000080, 0xd3d9400e, 0x18000080,
0xd3d9400f, 0x18000080, 0xd3d94010, 0x18000080, 0xd3d94011, 0x18000080,
0xd3d94012, 0x18000080, 0xd3d94013, 0x18000080, 0xd3d94014, 0x18000080,
0xd3d94015, 0x18000080, 0xd3d94016, 0x18000080, 0xd3d94017, 0x18000080,
0xd3d94018, 0x18000080, 0xd3d94019, 0x18000080, 0xd3d9401a, 0x18000080,
0xd3d9401b, 0x18000080, 0xd3d9401c, 0x18000080, 0xd3d9401d, 0x18000080,
0xd3d9401e, 0x18000080, 0xd3d9401f, 0x18000080, 0xd3d94020, 0x18000080,
0xd3d94021, 0x18000080, 0xd3d94022, 0x18000080, 0xd3d94023, 0x18000080,
0xd3d94024, 0x18000080, 0xd3d94025, 0x18000080, 0xd3d94026, 0x18000080,
0xd3d94027, 0x18000080, 0xd3d94028, 0x18000080, 0xd3d94029, 0x18000080,
0xd3d9402a, 0x18000080, 0xd3d9402b, 0x18000080, 0xd3d9402c, 0x18000080,
0xd3d9402d, 0x18000080, 0xd3d9402e, 0x18000080, 0xd3d9402f, 0x18000080,
0xd3d94030, 0x18000080, 0xd3d94031, 0x18000080, 0xd3d94032, 0x18000080,
0xd3d94033, 0x18000080, 0xd3d94034, 0x18000080, 0xd3d94035, 0x18000080,
0xd3d94036, 0x18000080, 0xd3d94037, 0x18000080, 0xd3d94038, 0x18000080,
0xd3d94039, 0x18000080, 0xd3d9403a, 0x18000080, 0xd3d9403b, 0x18000080,
0xd3d9403c, 0x18000080, 0xd3d9403d, 0x18000080, 0xd3d9403e, 0x18000080,
0xd3d9403f, 0x18000080, 0xd3d94040, 0x18000080, 0xd3d94041, 0x18000080,
0xd3d94042, 0x18000080, 0xd3d94043, 0x18000080, 0xd3d94044, 0x18000080,
0xd3d94045, 0x18000080, 0xd3d94046, 0x18000080, 0xd3d94047, 0x18000080,
0xd3d94048, 0x18000080, 0xd3d94049, 0x18000080, 0xd3d9404a, 0x18000080,
0xd3d9404b, 0x18000080, 0xd3d9404c, 0x18000080, 0xd3d9404d, 0x18000080,
0xd3d9404e, 0x18000080, 0xd3d9404f, 0x18000080, 0xd3d94050, 0x18000080,
0xd3d94051, 0x18000080, 0xd3d94052, 0x18000080, 0xd3d94053, 0x18000080,
0xd3d94054, 0x18000080, 0xd3d94055, 0x18000080, 0xd3d94056, 0x18000080,
0xd3d94057, 0x18000080, 0xd3d94058, 0x18000080, 0xd3d94059, 0x18000080,
0xd3d9405a, 0x18000080, 0xd3d9405b, 0x18000080, 0xd3d9405c, 0x18000080,
0xd3d9405d, 0x18000080, 0xd3d9405e, 0x18000080, 0xd3d9405f, 0x18000080,
0xd3d94060, 0x18000080, 0xd3d94061, 0x18000080, 0xd3d94062, 0x18000080,
0xd3d94063, 0x18000080, 0xd3d94064, 0x18000080, 0xd3d94065, 0x18000080,
0xd3d94066, 0x18000080, 0xd3d94067, 0x18000080, 0xd3d94068, 0x18000080,
0xd3d94069, 0x18000080, 0xd3d9406a, 0x18000080, 0xd3d9406b, 0x18000080,
0xd3d9406c, 0x18000080, 0xd3d9406d, 0x18000080, 0xd3d9406e, 0x18000080,
0xd3d9406f, 0x18000080, 0xd3d94070, 0x18000080, 0xd3d94071, 0x18000080,
0xd3d94072, 0x18000080, 0xd3d94073, 0x18000080, 0xd3d94074, 0x18000080,
0xd3d94075, 0x18000080, 0xd3d94076, 0x18000080, 0xd3d94077, 0x18000080,
0xd3d94078, 0x18000080, 0xd3d94079, 0x18000080, 0xd3d9407a, 0x18000080,
0xd3d9407b, 0x18000080, 0xd3d9407c, 0x18000080, 0xd3d9407d, 0x18000080,
0xd3d9407e, 0x18000080, 0xd3d9407f, 0x18000080, 0xd3d94080, 0x18000080,
0xd3d94081, 0x18000080, 0xd3d94082, 0x18000080, 0xd3d94083, 0x18000080,
0xd3d94084, 0x18000080, 0xd3d94085, 0x18000080, 0xd3d94086, 0x18000080,
0xd3d94087, 0x18000080, 0xd3d94088, 0x18000080, 0xd3d94089, 0x18000080,
0xd3d9408a, 0x18000080, 0xd3d9408b, 0x18000080, 0xd3d9408c, 0x18000080,
0xd3d9408d, 0x18000080, 0xd3d9408e, 0x18000080, 0xd3d9408f, 0x18000080,
0xd3d94090, 0x18000080, 0xd3d94091, 0x18000080, 0xd3d94092, 0x18000080,
0xd3d94093, 0x18000080, 0xd3d94094, 0x18000080, 0xd3d94095, 0x18000080,
0xd3d94096, 0x18000080, 0xd3d94097, 0x18000080, 0xd3d94098, 0x18000080,
0xd3d94099, 0x18000080, 0xd3d9409a, 0x18000080, 0xd3d9409b, 0x18000080,
0xd3d9409c, 0x18000080, 0xd3d9409d, 0x18000080, 0xd3d9409e, 0x18000080,
0xd3d9409f, 0x18000080, 0xd3d940a0, 0x18000080, 0xd3d940a1, 0x18000080,
0xd3d940a2, 0x18000080, 0xd3d940a3, 0x18000080, 0xd3d940a4, 0x18000080,
0xd3d940a5, 0x18000080, 0xd3d940a6, 0x18000080, 0xd3d940a7, 0x18000080,
0xd3d940a8, 0x18000080, 0xd3d940a9, 0x18000080, 0xd3d940aa, 0x18000080,
0xd3d940ab, 0x18000080, 0xd3d940ac, 0x18000080, 0xd3d940ad, 0x18000080,
0xd3d940ae, 0x18000080, 0xd3d940af, 0x18000080, 0xd3d940b0, 0x18000080,
0xd3d940b1, 0x18000080, 0xd3d940b2, 0x18000080, 0xd3d940b3, 0x18000080,
0xd3d940b4, 0x18000080, 0xd3d940b5, 0x18000080, 0xd3d940b6, 0x18000080,
0xd3d940b7, 0x18000080, 0xd3d940b8, 0x18000080, 0xd3d940b9, 0x18000080,
0xd3d940ba, 0x18000080, 0xd3d940bb, 0x18000080, 0xd3d940bc, 0x18000080,
0xd3d940bd, 0x18000080, 0xd3d940be, 0x18000080, 0xd3d940bf, 0x18000080,
0xd3d940c0, 0x18000080, 0xd3d940c1, 0x18000080, 0xd3d940c2, 0x18000080,
0xd3d940c3, 0x18000080, 0xd3d940c4, 0x18000080, 0xd3d940c5, 0x18000080,
0xd3d940c6, 0x18000080, 0xd3d940c7, 0x18000080, 0xd3d940c8, 0x18000080,
0xd3d940c9, 0x18000080, 0xd3d940ca, 0x18000080, 0xd3d940cb, 0x18000080,
0xd3d940cc, 0x18000080, 0xd3d940cd, 0x18000080, 0xd3d940ce, 0x18000080,
0xd3d940cf, 0x18000080, 0xd3d940d0, 0x18000080, 0xd3d940d1, 0x18000080,
0xd3d940d2, 0x18000080, 0xd3d940d3, 0x18000080, 0xd3d940d4, 0x18000080,
0xd3d940d5, 0x18000080, 0xd3d940d6, 0x18000080, 0xd3d940d7, 0x18000080,
0xd3d940d8, 0x18000080, 0xd3d940d9, 0x18000080, 0xd3d940da, 0x18000080,
0xd3d940db, 0x18000080, 0xd3d940dc, 0x18000080, 0xd3d940dd, 0x18000080,
0xd3d940de, 0x18000080, 0xd3d940df, 0x18000080, 0xd3d940e0, 0x18000080,
0xd3d940e1, 0x18000080, 0xd3d940e2, 0x18000080, 0xd3d940e3, 0x18000080,
0xd3d940e4, 0x18000080, 0xd3d940e5, 0x18000080, 0xd3d940e6, 0x18000080,
0xd3d940e7, 0x18000080, 0xd3d940e8, 0x18000080, 0xd3d940e9, 0x18000080,
0xd3d940ea, 0x18000080, 0xd3d940eb, 0x18000080, 0xd3d940ec, 0x18000080,
0xd3d940ed, 0x18000080, 0xd3d940ee, 0x18000080, 0xd3d940ef, 0x18000080,
0xd3d940f0, 0x18000080, 0xd3d940f1, 0x18000080, 0xd3d940f2, 0x18000080,
0xd3d940f3, 0x18000080, 0xd3d940f4, 0x18000080, 0xd3d940f5, 0x18000080,
0xd3d940f6, 0x18000080, 0xd3d940f7, 0x18000080, 0xd3d940f8, 0x18000080,
0xd3d940f9, 0x18000080, 0xd3d940fa, 0x18000080, 0xd3d940fb, 0x18000080,
0xd3d940fc, 0x18000080, 0xd3d940fd, 0x18000080, 0xd3d940fe, 0x18000080,
0xd3d940ff, 0x18000080, 0xb07c0000, 0xbe8a00ff, 0x000000f8, 0xbf11080a,
0x7e000280, 0x7e020280, 0x7e040280, 0x7e060280, 0x7e080280, 0x7e0a0280,
0x7e0c0280, 0x7e0e0280, 0x808a880a, 0xbe80320a, 0xbf84fff5, 0xbf9c0000,
0xd28c0001, 0x0001007f, 0xd28d0001, 0x0002027e, 0x10020288, 0xb88b0904,
0xb78b4000, 0xd1196a01, 0x00001701, 0xbe8a0087, 0xbefc00c1, 0xd89c4000,
0x00020201, 0xd89cc080, 0x00040401, 0x320202ff, 0x00000800, 0x808a810a,
0xbf84fff8, 0xbf810000,
};
/* When below register arrays changed, please update gpr_reg_size,
and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds,
to cover all gfx9 ASICs */
static const struct soc15_reg_entry vgpr_init_regs[] = {
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x3f },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
};
static const struct soc15_reg_entry vgpr_init_regs_arcturus[] = {
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0xbf },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
};
static const struct soc15_reg_entry sgpr1_init_regs[] = {
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x000000ff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x000000ff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x000000ff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x000000ff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x000000ff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x000000ff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x000000ff },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x000000ff },
};
static const struct soc15_reg_entry sgpr2_init_regs[] = {
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x0000ff00 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x0000ff00 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x0000ff00 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x0000ff00 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x0000ff00 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x0000ff00 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x0000ff00 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x0000ff00 },
};
static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = {
{ SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1},
{ SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1},
{ SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1},
{ SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, 1},
{ SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1, 1},
{ SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), 0, 1, 1},
{ SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, 1},
{ SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, 1},
{ SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, 1},
{ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, 1},
{ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), 0, 1, 1},
{ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED), 0, 1, 1},
{ SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1},
{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6},
{ SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16},
{ SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16},
{ SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16},
{ SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16},
{ SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16},
{ SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16},
{ SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16},
{ SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16},
{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6},
{ SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16},
{ SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 0, 4, 16},
{ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, 1},
{ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, 1},
{ SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 1, 32},
{ SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 1, 32},
{ SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 1, 72},
{ SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16},
{ SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2},
{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6},
};
static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
int i, r;
/* only support when RAS is enabled */
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
return 0;
r = amdgpu_ring_alloc(ring, 7);
if (r) {
DRM_ERROR("amdgpu: GDS workarounds failed to lock ring %s (%d).\n",
ring->name, r);
return r;
}
WREG32_SOC15(GC, 0, mmGDS_VMID0_BASE, 0x00000000);
WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, adev->gds.gds_size);
amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
PACKET3_DMA_DATA_DST_SEL(1) |
PACKET3_DMA_DATA_SRC_SEL(2) |
PACKET3_DMA_DATA_ENGINE(0)));
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
adev->gds.gds_size);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
if (ring->wptr == gfx_v9_0_ring_get_rptr_compute(ring))
break;
udelay(1);
}
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, 0x00000000);
return r;
}
static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
struct amdgpu_ib ib;
struct dma_fence *f = NULL;
int r, i;
unsigned total_size, vgpr_offset, sgpr_offset;
u64 gpu_addr;
int compute_dim_x = adev->gfx.config.max_shader_engines *
adev->gfx.config.max_cu_per_sh *
adev->gfx.config.max_sh_per_se;
int sgpr_work_group_size = 5;
int gpr_reg_size = adev->gfx.config.max_shader_engines + 6;
int vgpr_init_shader_size;
const u32 *vgpr_init_shader_ptr;
const struct soc15_reg_entry *vgpr_init_regs_ptr;
/* only support when RAS is enabled */
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
return 0;
/* bail if the compute ring is not ready */
if (!ring->sched.ready)
return 0;
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) {
vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus;
vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus);
vgpr_init_regs_ptr = vgpr_init_regs_arcturus;
} else {
vgpr_init_shader_ptr = vgpr_init_compute_shader;
vgpr_init_shader_size = sizeof(vgpr_init_compute_shader);
vgpr_init_regs_ptr = vgpr_init_regs;
}
total_size =
(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */
total_size +=
(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS1 */
total_size +=
(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */
total_size = ALIGN(total_size, 256);
vgpr_offset = total_size;
total_size += ALIGN(vgpr_init_shader_size, 256);
sgpr_offset = total_size;
total_size += sizeof(sgpr_init_compute_shader);
/* allocate an indirect buffer to put the commands in */
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, total_size,
AMDGPU_IB_POOL_DIRECT, &ib);
if (r) {
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
return r;
}
/* load the compute shaders */
for (i = 0; i < vgpr_init_shader_size/sizeof(u32); i++)
ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_shader_ptr[i];
for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
/* init the ib length to 0 */
ib.length_dw = 0;
/* VGPR */
/* write the register state for the compute dispatch */
for (i = 0; i < gpr_reg_size; i++) {
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs_ptr[i])
- PACKET3_SET_SH_REG_START;
ib.ptr[ib.length_dw++] = vgpr_init_regs_ptr[i].reg_value;
}
/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
- PACKET3_SET_SH_REG_START;
ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
/* write dispatch packet */
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
ib.ptr[ib.length_dw++] = compute_dim_x * 2; /* x */
ib.ptr[ib.length_dw++] = 1; /* y */
ib.ptr[ib.length_dw++] = 1; /* z */
ib.ptr[ib.length_dw++] =
REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
/* write CS partial flush packet */
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
/* SGPR1 */
/* write the register state for the compute dispatch */
for (i = 0; i < gpr_reg_size; i++) {
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr1_init_regs[i])
- PACKET3_SET_SH_REG_START;
ib.ptr[ib.length_dw++] = sgpr1_init_regs[i].reg_value;
}
/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
- PACKET3_SET_SH_REG_START;
ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
/* write dispatch packet */
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
ib.ptr[ib.length_dw++] = 1; /* y */
ib.ptr[ib.length_dw++] = 1; /* z */
ib.ptr[ib.length_dw++] =
REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
/* write CS partial flush packet */
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
/* SGPR2 */
/* write the register state for the compute dispatch */
for (i = 0; i < gpr_reg_size; i++) {
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr2_init_regs[i])
- PACKET3_SET_SH_REG_START;
ib.ptr[ib.length_dw++] = sgpr2_init_regs[i].reg_value;
}
/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
- PACKET3_SET_SH_REG_START;
ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
/* write dispatch packet */
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
ib.ptr[ib.length_dw++] = 1; /* y */
ib.ptr[ib.length_dw++] = 1; /* z */
ib.ptr[ib.length_dw++] =
REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
/* write CS partial flush packet */
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
/* shedule the ib on the ring */
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r) {
DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
goto fail;
}
/* wait for the GPU to finish processing the IB */
r = dma_fence_wait(f, false);
if (r) {
DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
goto fail;
}
fail:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
return r;
}
static int gfx_v9_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
adev->gfx.num_gfx_rings = 0;
else
adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
adev->gfx.xcc_mask = 1;
adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
AMDGPU_MAX_COMPUTE_RINGS);
gfx_v9_0_set_kiq_pm4_funcs(adev);
gfx_v9_0_set_ring_funcs(adev);
gfx_v9_0_set_irq_funcs(adev);
gfx_v9_0_set_gds_init(adev);
gfx_v9_0_set_rlc_funcs(adev);
/* init rlcg reg access ctrl */
gfx_v9_0_init_rlcg_reg_access_ctrl(adev);
return gfx_v9_0_init_microcode(adev);
}
static int gfx_v9_0_ecc_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
/*
* Temp workaround to fix the issue that CP firmware fails to
* update read pointer when CPDMA is writing clearing operation
* to GDS in suspend/resume sequence on several cards. So just
* limit this operation in cold boot sequence.
*/
if ((!adev->in_suspend) &&
(adev->gds.gds_size)) {
r = gfx_v9_0_do_edc_gds_workarounds(adev);
if (r)
return r;
}
/* requires IBs so do in late init after IB pool is initialized */
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
r = gfx_v9_4_2_do_edc_gpr_workarounds(adev);
else
r = gfx_v9_0_do_edc_gpr_workarounds(adev);
if (r)
return r;
if (adev->gfx.ras &&
adev->gfx.ras->enable_watchdog_timer)
adev->gfx.ras->enable_watchdog_timer(adev);
return 0;
}
static int gfx_v9_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
if (r)
return r;
r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
if (r)
return r;
r = gfx_v9_0_ecc_late_init(handle);
if (r)
return r;
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
gfx_v9_4_2_debug_trap_config_init(adev,
adev->vm_manager.first_kfd_vmid, AMDGPU_NUM_VMID);
else
gfx_v9_0_debug_trap_config_init(adev,
adev->vm_manager.first_kfd_vmid, AMDGPU_NUM_VMID);
return 0;
}
static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
{
uint32_t rlc_setting;
/* if RLC is not enabled, do nothing */
rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
return false;
return true;
}
static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
uint32_t data;
unsigned i;
data = RLC_SAFE_MODE__CMD_MASK;
data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
/* wait for RLC_SAFE_MODE */
for (i = 0; i < adev->usec_timeout; i++) {
if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
break;
udelay(1);
}
}
static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
uint32_t data;
data = RLC_SAFE_MODE__CMD_MASK;
WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
}
static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
bool enable)
{
amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
} else {
gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
}
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
bool enable)
{
/* TODO: double check if we need to perform under safe mode */
/* gfx_v9_0_enter_rlc_safe_mode(adev); */
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true);
else
gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false);
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true);
else
gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false);
/* gfx_v9_0_exit_rlc_safe_mode(adev); */
}
static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t data, def;
amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
/* 1 - RLC_CGTT_MGCG_OVERRIDE */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 2, 1))
data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
/* only for Vega10 & Raven1 */
data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
/* MGLS is a global flag to control all MGLS in GFX */
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
/* 2 - RLC memory Light sleep */
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
}
/* 3 - CP memory Light sleep */
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
if (def != data)
WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
}
}
} else {
/* 1 - MGCG_OVERRIDE */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 2, 1))
data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
/* 2 - disable MGLS in RLC */
data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
}
/* 3 - disable MGLS in CP */
data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
}
}
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t data, def;
if (!adev->gfx.num_gfx_rings)
return;
amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
/* Enable 3D CGCG/CGLS */
if (enable) {
/* write cmd to clear cgcg/cgls ov */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
/* unset CGCG override */
data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
/* update CGCG and CGLS override bits */
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
/* enable 3Dcgcg FSM(0x0000363f) */
def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
else
data = 0x0 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT;
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
/* set IDLE_POLL_COUNT(0x00900100) */
def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
(0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
if (def != data)
WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
} else {
/* Disable CGCG/CGLS */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
/* disable cgcg, cgls should be disabled */
data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
/* disable cgcg and cgls in FSM */
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
}
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t def, data;
amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
/* unset CGCG override */
data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
else
data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
/* update CGCG and CGLS override bits */
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
/* enable cgcg FSM(0x0000363F) */
def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1))
data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
else
data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
/* set IDLE_POLL_COUNT(0x00900100) */
def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
(0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
if (def != data)
WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
} else {
def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
/* reset CGCG/CGLS bits */
data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
/* disable cgcg and cgls in FSM */
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
}
amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
bool enable)
{
if (enable) {
/* CGCG/CGLS should be enabled after MGCG/MGLS
* === MGCG + MGLS ===
*/
gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
/* === CGCG /CGLS for GFX 3D Only === */
gfx_v9_0_update_3d_clock_gating(adev, enable);
/* === CGCG + CGLS === */
gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
} else {
/* CGCG/CGLS should be disabled before MGCG/MGLS
* === CGCG + CGLS ===
*/
gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
/* === CGCG /CGLS for GFX 3D Only === */
gfx_v9_0_update_3d_clock_gating(adev, enable);
/* === MGCG + MGLS === */
gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
}
return 0;
}
static void gfx_v9_0_update_spm_vmid_internal(struct amdgpu_device *adev,
unsigned int vmid)
{
u32 reg, data;
reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
if (amdgpu_sriov_is_pp_one_vf(adev))
data = RREG32_NO_KIQ(reg);
else
data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
if (amdgpu_sriov_is_pp_one_vf(adev))
WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
else
WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
}
static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned int vmid)
{
amdgpu_gfx_off_ctrl(adev, false);
gfx_v9_0_update_spm_vmid_internal(adev, vmid);
amdgpu_gfx_off_ctrl(adev, true);
}
static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev,
uint32_t offset,
struct soc15_reg_rlcg *entries, int arr_size)
{
int i;
uint32_t reg;
if (!entries)
return false;
for (i = 0; i < arr_size; i++) {
const struct soc15_reg_rlcg *entry;
entry = &entries[i];
reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
if (offset == reg)
return true;
}
return false;
}
static bool gfx_v9_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
{
return gfx_v9_0_check_rlcg_range(adev, offset,
(void *)rlcg_access_gc_9_0,
ARRAY_SIZE(rlcg_access_gc_9_0));
}
static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
.is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
.set_safe_mode = gfx_v9_0_set_safe_mode,
.unset_safe_mode = gfx_v9_0_unset_safe_mode,
.init = gfx_v9_0_rlc_init,
.get_csb_size = gfx_v9_0_get_csb_size,
.get_csb_buffer = gfx_v9_0_get_csb_buffer,
.get_cp_table_num = gfx_v9_0_cp_jump_table_num,
.resume = gfx_v9_0_rlc_resume,
.stop = gfx_v9_0_rlc_stop,
.reset = gfx_v9_0_rlc_reset,
.start = gfx_v9_0_rlc_start,
.update_spm_vmid = gfx_v9_0_update_spm_vmid,
.is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
};
static int gfx_v9_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_PG_STATE_GATE);
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 2, 2):
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 3, 0):
if (!enable)
amdgpu_gfx_off_ctrl(adev, false);
if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
} else {
gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
}
if (adev->pg_flags & AMD_PG_SUPPORT_CP)
gfx_v9_0_enable_cp_power_gating(adev, true);
else
gfx_v9_0_enable_cp_power_gating(adev, false);
/* update gfx cgpg state */
gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
/* update mgcg state */
gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
if (enable)
amdgpu_gfx_off_ctrl(adev, true);
break;
case IP_VERSION(9, 2, 1):
amdgpu_gfx_off_ctrl(adev, enable);
break;
default:
break;
}
return 0;
}
static int gfx_v9_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (amdgpu_sriov_vf(adev))
return 0;
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 0, 1):
case IP_VERSION(9, 2, 1):
case IP_VERSION(9, 4, 0):
case IP_VERSION(9, 2, 2):
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 3, 0):
case IP_VERSION(9, 4, 2):
gfx_v9_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
default:
break;
}
return 0;
}
static void gfx_v9_0_get_clockgating_state(void *handle, u64 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int data;
if (amdgpu_sriov_vf(adev))
*flags = 0;
/* AMD_CG_SUPPORT_GFX_MGCG */
data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE));
if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
*flags |= AMD_CG_SUPPORT_GFX_MGCG;
/* AMD_CG_SUPPORT_GFX_CGCG */
data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL));
if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_CGCG;
/* AMD_CG_SUPPORT_GFX_CGLS */
if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_CGLS;
/* AMD_CG_SUPPORT_GFX_RLC_LS */
data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL));
if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
/* AMD_CG_SUPPORT_GFX_CP_LS */
data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL));
if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1)) {
/* AMD_CG_SUPPORT_GFX_3D_CGCG */
data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D));
if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
/* AMD_CG_SUPPORT_GFX_3D_CGLS */
if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
}
}
static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
{
return *ring->rptr_cpu_addr; /* gfx9 is 32bit rptr*/
}
static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u64 wptr;
/* XXX check if swapping is necessary on BE */
if (ring->use_doorbell) {
wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
} else {
wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
}
return wptr;
}
static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring->use_doorbell) {
/* XXX check if swapping is necessary on BE */
atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr);
WDOORBELL64(ring->doorbell_index, ring->wptr);
} else {
WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
}
}
static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask, reg_mem_engine;
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
switch (ring->me) {
case 1:
ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
break;
case 2:
ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
break;
default:
return;
}
reg_mem_engine = 0;
} else {
ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
reg_mem_engine = 1; /* pfp */
}
gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
adev->nbio.funcs->get_hdp_flush_req_offset(adev),
adev->nbio.funcs->get_hdp_flush_done_offset(adev),
ref_and_mask, ref_and_mask, 0x20);
}
static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_job *job,
struct amdgpu_ib *ib,
uint32_t flags)
{
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 header, control = 0;
if (ib->flags & AMDGPU_IB_FLAG_CE)
header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
else
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
control |= ib->length_dw | (vmid << 24);
if (ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
control |= INDIRECT_BUFFER_PRE_ENB(1);
if (flags & AMDGPU_IB_PREEMPTED)
control |= INDIRECT_BUFFER_PRE_RESUME(1);
if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
gfx_v9_0_ring_emit_de_meta(ring,
(!amdgpu_sriov_vf(ring->adev) &&
flags & AMDGPU_IB_PREEMPTED) ?
true : false,
job->gds_size > 0 && job->gds_base != 0);
}
amdgpu_ring_write(ring, header);
BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
amdgpu_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_ib_on_emit_cntl(ring);
amdgpu_ring_write(ring, control);
}
static void gfx_v9_0_ring_patch_cntl(struct amdgpu_ring *ring,
unsigned offset)
{
u32 control = ring->ring[offset];
control |= INDIRECT_BUFFER_PRE_RESUME(1);
ring->ring[offset] = control;
}
static void gfx_v9_0_ring_patch_ce_meta(struct amdgpu_ring *ring,
unsigned offset)
{
struct amdgpu_device *adev = ring->adev;
void *ce_payload_cpu_addr;
uint64_t payload_offset, payload_size;
payload_size = sizeof(struct v9_ce_ib_state);
if (ring->is_mes_queue) {
payload_offset = offsetof(struct amdgpu_mes_ctx_meta_data,
gfx[0].gfx_meta_data) +
offsetof(struct v9_gfx_meta_data, ce_payload);
ce_payload_cpu_addr =
amdgpu_mes_ctx_get_offs_cpu_addr(ring, payload_offset);
} else {
payload_offset = offsetof(struct v9_gfx_meta_data, ce_payload);
ce_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
}
if (offset + (payload_size >> 2) <= ring->buf_mask + 1) {
memcpy((void *)&ring->ring[offset], ce_payload_cpu_addr, payload_size);
} else {
memcpy((void *)&ring->ring[offset], ce_payload_cpu_addr,
(ring->buf_mask + 1 - offset) << 2);
payload_size -= (ring->buf_mask + 1 - offset) << 2;
memcpy((void *)&ring->ring[0],
ce_payload_cpu_addr + ((ring->buf_mask + 1 - offset) << 2),
payload_size);
}
}
static void gfx_v9_0_ring_patch_de_meta(struct amdgpu_ring *ring,
unsigned offset)
{
struct amdgpu_device *adev = ring->adev;
void *de_payload_cpu_addr;
uint64_t payload_offset, payload_size;
payload_size = sizeof(struct v9_de_ib_state);
if (ring->is_mes_queue) {
payload_offset = offsetof(struct amdgpu_mes_ctx_meta_data,
gfx[0].gfx_meta_data) +
offsetof(struct v9_gfx_meta_data, de_payload);
de_payload_cpu_addr =
amdgpu_mes_ctx_get_offs_cpu_addr(ring, payload_offset);
} else {
payload_offset = offsetof(struct v9_gfx_meta_data, de_payload);
de_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;
}
((struct v9_de_ib_state *)de_payload_cpu_addr)->ib_completion_status =
IB_COMPLETION_STATUS_PREEMPTED;
if (offset + (payload_size >> 2) <= ring->buf_mask + 1) {
memcpy((void *)&ring->ring[offset], de_payload_cpu_addr, payload_size);
} else {
memcpy((void *)&ring->ring[offset], de_payload_cpu_addr,
(ring->buf_mask + 1 - offset) << 2);
payload_size -= (ring->buf_mask + 1 - offset) << 2;
memcpy((void *)&ring->ring[0],
de_payload_cpu_addr + ((ring->buf_mask + 1 - offset) << 2),
payload_size);
}
}
static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
struct amdgpu_job *job,
struct amdgpu_ib *ib,
uint32_t flags)
{
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
/* Currently, there is a high possibility to get wave ID mismatch
* between ME and GDS, leading to a hw deadlock, because ME generates
* different wave IDs than the GDS expects. This situation happens
* randomly when at least 5 compute pipes use GDS ordered append.
* The wave IDs generated by ME are also wrong after suspend/resume.
* Those are probably bugs somewhere else in the kernel driver.
*
* Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
* GDS to 0 for this ring (me/pipe).
*/
if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
}
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
amdgpu_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, control);
}
static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
u64 seq, unsigned flags)
{
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
bool exec = flags & AMDGPU_FENCE_FLAG_EXEC;
uint32_t dw2 = 0;
/* RELEASE_MEM - flush caches, send int */
amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
if (writeback) {
dw2 = EOP_TC_NC_ACTION_EN;
} else {
dw2 = EOP_TCL1_ACTION_EN | EOP_TC_ACTION_EN |
EOP_TC_MD_ACTION_EN;
}
dw2 |= EOP_TC_WB_ACTION_EN | EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
EVENT_INDEX(5);
if (exec)
dw2 |= EOP_EXEC;
amdgpu_ring_write(ring, dw2);
amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
/*
* the address should be Qword aligned if 64bit write, Dword
* aligned if only send 32bit data low (discard data high)
*/
if (write64bit)
BUG_ON(addr & 0x7);
else
BUG_ON(addr & 0x3);
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, lower_32_bits(seq));
amdgpu_ring_write(ring, upper_32_bits(seq));
amdgpu_ring_write(ring, 0);
}
static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0,
lower_32_bits(addr), upper_32_bits(addr),
seq, 0xffffffff, 4);
}
static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
/* compute doesn't have PFP */
if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
/* sync PFP to ME, otherwise we might get invalid PFP reads */
amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
amdgpu_ring_write(ring, 0x0);
}
}
static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
{
return *ring->rptr_cpu_addr; /* gfx9 hardware is 32bit rptr */
}
static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
{
u64 wptr;
/* XXX check if swapping is necessary on BE */
if (ring->use_doorbell)
wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
else
BUG();
return wptr;
}
static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
/* XXX check if swapping is necessary on BE */
if (ring->use_doorbell) {
atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr);
WDOORBELL64(ring->doorbell_index, ring->wptr);
} else{
BUG(); /* only DOORBELL method supported on gfx9 now */
}
}
static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
u64 seq, unsigned int flags)
{
struct amdgpu_device *adev = ring->adev;
/* we only allocate 32bit for each seq wb address */
BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
/* write fence seq to the "addr" */
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, lower_32_bits(seq));
if (flags & AMDGPU_FENCE_FLAG_INT) {
/* set register to trigger INT */
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
}
}
static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
amdgpu_ring_write(ring, 0);
}
static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume)
{
struct amdgpu_device *adev = ring->adev;
struct v9_ce_ib_state ce_payload = {0};
uint64_t offset, ce_payload_gpu_addr;
void *ce_payload_cpu_addr;
int cnt;
cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
if (ring->is_mes_queue) {
offset = offsetof(struct amdgpu_mes_ctx_meta_data,
gfx[0].gfx_meta_data) +
offsetof(struct v9_gfx_meta_data, ce_payload);
ce_payload_gpu_addr =
amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
ce_payload_cpu_addr =
amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
} else {
offset = offsetof(struct v9_gfx_meta_data, ce_payload);
ce_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
ce_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
}
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
WRITE_DATA_DST_SEL(8) |
WR_CONFIRM) |
WRITE_DATA_CACHE_POLICY(0));
amdgpu_ring_write(ring, lower_32_bits(ce_payload_gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ce_payload_gpu_addr));
amdgpu_ring_ib_on_emit_ce(ring);
if (resume)
amdgpu_ring_write_multiple(ring, ce_payload_cpu_addr,
sizeof(ce_payload) >> 2);
else
amdgpu_ring_write_multiple(ring, (void *)&ce_payload,
sizeof(ce_payload) >> 2);
}
static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring)
{
int i, r = 0;
struct amdgpu_device *adev = ring->adev;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *kiq_ring = &kiq->ring;
unsigned long flags;
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
spin_lock_irqsave(&kiq->ring_lock, flags);
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
spin_unlock_irqrestore(&kiq->ring_lock, flags);
return -ENOMEM;
}
/* assert preemption condition */
amdgpu_ring_set_preempt_cond_exec(ring, false);
ring->trail_seq += 1;
amdgpu_ring_alloc(ring, 13);
gfx_v9_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
ring->trail_seq, AMDGPU_FENCE_FLAG_EXEC | AMDGPU_FENCE_FLAG_INT);
/* assert IB preemption, emit the trailing fence */
kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP,
ring->trail_fence_gpu_addr,
ring->trail_seq);
amdgpu_ring_commit(kiq_ring);
spin_unlock_irqrestore(&kiq->ring_lock, flags);
/* poll the trailing fence */
for (i = 0; i < adev->usec_timeout; i++) {
if (ring->trail_seq ==
le32_to_cpu(*ring->trail_fence_cpu_addr))
break;
udelay(1);
}
if (i >= adev->usec_timeout) {
r = -EINVAL;
DRM_WARN("ring %d timeout to preempt ib\n", ring->idx);
}
/*reset the CP_VMID_PREEMPT after trailing fence*/
amdgpu_ring_emit_wreg(ring,
SOC15_REG_OFFSET(GC, 0, mmCP_VMID_PREEMPT),
0x0);
amdgpu_ring_commit(ring);
/* deassert preemption condition */
amdgpu_ring_set_preempt_cond_exec(ring, true);
return r;
}
static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume, bool usegds)
{
struct amdgpu_device *adev = ring->adev;
struct v9_de_ib_state de_payload = {0};
uint64_t offset, gds_addr, de_payload_gpu_addr;
void *de_payload_cpu_addr;
int cnt;
if (ring->is_mes_queue) {
offset = offsetof(struct amdgpu_mes_ctx_meta_data,
gfx[0].gfx_meta_data) +
offsetof(struct v9_gfx_meta_data, de_payload);
de_payload_gpu_addr =
amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
de_payload_cpu_addr =
amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
offset = offsetof(struct amdgpu_mes_ctx_meta_data,
gfx[0].gds_backup) +
offsetof(struct v9_gfx_meta_data, de_payload);
gds_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
} else {
offset = offsetof(struct v9_gfx_meta_data, de_payload);
de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
AMDGPU_CSA_SIZE - adev->gds.gds_size,
PAGE_SIZE);
}
if (usegds) {
de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
}
cnt = (sizeof(de_payload) >> 2) + 4 - 2;
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(8) |
WR_CONFIRM) |
WRITE_DATA_CACHE_POLICY(0));
amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr));
amdgpu_ring_ib_on_emit_de(ring);
if (resume)
amdgpu_ring_write_multiple(ring, de_payload_cpu_addr,
sizeof(de_payload) >> 2);
else
amdgpu_ring_write_multiple(ring, (void *)&de_payload,
sizeof(de_payload) >> 2);
}
static void gfx_v9_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
bool secure)
{
uint32_t v = secure ? FRAME_TMZ : 0;
amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
}
static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
{
uint32_t dw2 = 0;
gfx_v9_0_ring_emit_ce_meta(ring,
(!amdgpu_sriov_vf(ring->adev) &&
flags & AMDGPU_IB_PREEMPTED) ? true : false);
dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
if (flags & AMDGPU_HAVE_CTX_SWITCH) {
/* set load_global_config & load_global_uconfig */
dw2 |= 0x8001;
/* set load_cs_sh_regs */
dw2 |= 0x01000000;
/* set load_per_context_state & load_gfx_sh_regs for GFX */
dw2 |= 0x10002;
/* set load_ce_ram if preamble presented */
if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
dw2 |= 0x10000000;
} else {
/* still load_ce_ram if this is the first time preamble presented
* although there is no context switch happens.
*/
if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
dw2 |= 0x10000000;
}
amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
amdgpu_ring_write(ring, dw2);
amdgpu_ring_write(ring, 0);
}
static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
{
unsigned ret;
amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
ret = ring->wptr & ring->buf_mask;
amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
return ret;
}
static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
{
unsigned cur;
BUG_ON(offset > ring->buf_mask);
BUG_ON(ring->ring[offset] != 0x55aa55aa);
cur = (ring->wptr - 1) & ring->buf_mask;
if (likely(cur > offset))
ring->ring[offset] = cur - offset;
else
ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
}
static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
uint32_t reg_val_offs)
{
struct amdgpu_device *adev = ring->adev;
amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
amdgpu_ring_write(ring, 0 | /* src: register*/
(5 << 8) | /* dst: memory */
(1 << 20)); /* write confirm */
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
reg_val_offs * 4));
amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
reg_val_offs * 4));
}
static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val)
{
uint32_t cmd = 0;
switch (ring->funcs->type) {
case AMDGPU_RING_TYPE_GFX:
cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
break;
case AMDGPU_RING_TYPE_KIQ:
cmd = (1 << 16); /* no inc addr */
break;
default:
cmd = WR_CONFIRM;
break;
}
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, cmd);
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, val);
}
static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val, uint32_t mask)
{
gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
}
static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
uint32_t reg0, uint32_t reg1,
uint32_t ref, uint32_t mask)
{
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
struct amdgpu_device *adev = ring->adev;
bool fw_version_ok = (ring->funcs->type == AMDGPU_RING_TYPE_GFX) ?
adev->gfx.me_fw_write_wait : adev->gfx.mec_fw_write_wait;
if (fw_version_ok)
gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
ref, mask, 0x20);
else
amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
ref, mask);
}
static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
{
struct amdgpu_device *adev = ring->adev;
uint32_t value = 0;
value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
WREG32_SOC15(GC, 0, mmSQ_CMD, value);
}
static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
enum amdgpu_interrupt_state state)
{
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
case AMDGPU_IRQ_STATE_ENABLE:
WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
TIME_STAMP_INT_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
break;
default:
break;
}
}
static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
int me, int pipe,
enum amdgpu_interrupt_state state)
{
u32 mec_int_cntl, mec_int_cntl_reg;
/*
* amdgpu controls only the first MEC. That's why this function only
* handles the setting of interrupts for this specific MEC. All other
* pipes' interrupts are set by amdkfd.
*/
if (me == 1) {
switch (pipe) {
case 0:
mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
break;
case 1:
mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
break;
case 2:
mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
break;
case 3:
mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
break;
default:
DRM_DEBUG("invalid pipe %d\n", pipe);
return;
}
} else {
DRM_DEBUG("invalid me %d\n", me);
return;
}
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
mec_int_cntl = RREG32_SOC15_IP(GC,mec_int_cntl_reg);
mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
TIME_STAMP_INT_ENABLE, 0);
WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
TIME_STAMP_INT_ENABLE, 1);
WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
break;
default:
break;
}
}
static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
case AMDGPU_IRQ_STATE_ENABLE:
WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
PRIV_REG_INT_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
break;
default:
break;
}
return 0;
}
static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
case AMDGPU_IRQ_STATE_ENABLE:
WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
PRIV_INSTR_INT_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
break;
default:
break;
}
return 0;
}
#define ENABLE_ECC_ON_ME_PIPE(me, pipe) \
WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
CP_ECC_ERROR_INT_ENABLE, 1)
#define DISABLE_ECC_ON_ME_PIPE(me, pipe) \
WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
CP_ECC_ERROR_INT_ENABLE, 0)
static int gfx_v9_0_set_cp_ecc_error_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
CP_ECC_ERROR_INT_ENABLE, 0);
DISABLE_ECC_ON_ME_PIPE(1, 0);
DISABLE_ECC_ON_ME_PIPE(1, 1);
DISABLE_ECC_ON_ME_PIPE(1, 2);
DISABLE_ECC_ON_ME_PIPE(1, 3);
break;
case AMDGPU_IRQ_STATE_ENABLE:
WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
CP_ECC_ERROR_INT_ENABLE, 1);
ENABLE_ECC_ON_ME_PIPE(1, 0);
ENABLE_ECC_ON_ME_PIPE(1, 1);
ENABLE_ECC_ON_ME_PIPE(1, 2);
ENABLE_ECC_ON_ME_PIPE(1, 3);
break;
default:
break;
}
return 0;
}
static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
{
switch (type) {
case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
break;
case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
break;
case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
break;
case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
break;
case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
break;
case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
break;
case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
break;
case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
break;
case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
break;
default:
break;
}
return 0;
}
static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
int i;
u8 me_id, pipe_id, queue_id;
struct amdgpu_ring *ring;
DRM_DEBUG("IH: CP EOP\n");
me_id = (entry->ring_id & 0x0c) >> 2;
pipe_id = (entry->ring_id & 0x03) >> 0;
queue_id = (entry->ring_id & 0x70) >> 4;
switch (me_id) {
case 0:
if (adev->gfx.num_gfx_rings &&
!amdgpu_mcbp_handle_trailing_fence_irq(&adev->gfx.muxer)) {
/* Fence signals are handled on the software rings*/
for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++)
amdgpu_fence_process(&adev->gfx.sw_gfx_ring[i]);
}
break;
case 1:
case 2:
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
/* Per-queue interrupt is supported for MEC starting from VI.
* The interrupt can only be enabled/disabled per pipe instead of per queue.
*/
if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
amdgpu_fence_process(ring);
}
break;
}
return 0;
}
static void gfx_v9_0_fault(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
u8 me_id, pipe_id, queue_id;
struct amdgpu_ring *ring;
int i;
me_id = (entry->ring_id & 0x0c) >> 2;
pipe_id = (entry->ring_id & 0x03) >> 0;
queue_id = (entry->ring_id & 0x70) >> 4;
switch (me_id) {
case 0:
drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
break;
case 1:
case 2:
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
if (ring->me == me_id && ring->pipe == pipe_id &&
ring->queue == queue_id)
drm_sched_fault(&ring->sched);
}
break;
}
}
static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
DRM_ERROR("Illegal register access in command stream\n");
gfx_v9_0_fault(adev, entry);
return 0;
}
static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
DRM_ERROR("Illegal instruction in command stream\n");
gfx_v9_0_fault(adev, entry);
return 0;
}
static const struct soc15_ras_field_entry gfx_v9_0_ras_fields[] = {
{ "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT)
},
{ "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT),
SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT)
},
{ "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME1),
0, 0
},
{ "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME2),
0, 0
},
{ "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT),
SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT)
},
{ "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
SOC15_REG_FIELD(CPG_EDC_DMA_CNT, ROQ_COUNT),
0, 0
},
{ "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_SEC_COUNT),
SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_DED_COUNT)
},
{ "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT),
SOC15_REG_FIELD(CPG_EDC_TAG_CNT, SEC_COUNT),
SOC15_REG_FIELD(CPG_EDC_TAG_CNT, DED_COUNT)
},
{ "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, COUNT_ME1),
0, 0
},
{ "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, COUNT_ME1),
0, 0
},
{ "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT),
SOC15_REG_FIELD(DC_EDC_STATE_CNT, COUNT_ME1),
0, 0
},
{ "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED)
},
{ "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
SOC15_REG_FIELD(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED),
0, 0
},
{ "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED)
},
{ "GDS_OA_PHY_PHY_CMD_RAM_MEM",
SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED)
},
{ "GDS_OA_PHY_PHY_DATA_RAM_MEM",
SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED),
0, 0
},
{ "GDS_OA_PIPE_ME1_PIPE0_PIPE_MEM",
SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED)
},
{ "GDS_OA_PIPE_ME1_PIPE1_PIPE_MEM",
SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED)
},
{ "GDS_OA_PIPE_ME1_PIPE2_PIPE_MEM",
SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED)
},
{ "GDS_OA_PIPE_ME1_PIPE3_PIPE_MEM",
SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED)
},
{ "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT),
0, 0
},
{ "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT)
},
{ "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT),
0, 0
},
{ "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT),
0, 0
},
{ "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT),
0, 0
},
{ "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT),
0, 0
},
{ "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT),
0, 0
},
{ "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SED_COUNT),
0, 0
},
{ "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT)
},
{ "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT)
},
{ "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT)
},
{ "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT)
},
{ "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT)
},
{ "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT),
0, 0
},
{ "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT),
0, 0
},
{ "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT),
0, 0
},
{ "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_DATA_SED_COUNT),
0, 0
},
{ "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT),
0, 0
},
{ "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
SOC15_REG_FIELD(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT),
0, 0
},
{ "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT),
0, 0
},
{ "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT),
0, 0
},
{ "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
SOC15_REG_FIELD(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT),
0, 0
},
{ "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
SOC15_REG_FIELD(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT),
0, 0
},
{ "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT),
0, 0
},
{ "TCC_WRRET_TAG_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
SOC15_REG_FIELD(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT),
0, 0
},
{ "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
SOC15_REG_FIELD(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT),
0, 0
},
{ "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT),
SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SED_COUNT),
0, 0
},
{ "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT)
},
{ "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT)
},
{ "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT),
0, 0
},
{ "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
0, 0
},
{ "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT),
0, 0
},
{ "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT)
},
{ "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT)
},
{ "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT)
},
{ "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT)
},
{ "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SED_COUNT),
0, 0
},
{ "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT)
},
{ "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT)
},
{ "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT)
},
{ "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT)
},
{ "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT)
},
{ "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT)
},
{ "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT)
},
{ "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT)
},
{ "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT)
},
{ "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT)
},
{ "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT)
},
{ "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT)
},
{ "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT)
},
{ "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT)
},
{ "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT)
},
{ "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT)
},
{ "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT)
},
{ "SQC_INST_BANKA_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT),
0, 0
},
{ "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT),
0, 0
},
{ "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT),
0, 0
},
{ "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT),
0, 0
},
{ "SQC_DATA_BANKA_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT),
0, 0
},
{ "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT)
},
{ "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT)
},
{ "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT)
},
{ "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT)
},
{ "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT)
},
{ "SQC_INST_BANKB_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT),
0, 0
},
{ "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT),
0, 0
},
{ "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT),
0, 0
},
{ "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT),
0, 0
},
{ "SQC_DATA_BANKB_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT),
0, 0
},
{ "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT)
},
{ "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT)
},
{ "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT)
},
{ "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT)
},
{ "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT)
},
{ "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
0, 0
},
{ "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
0, 0
},
{ "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT),
0, 0
},
{ "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
0, 0
},
{ "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
0, 0
},
{ "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT)
},
{ "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT)
},
{ "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT)
},
{ "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
0, 0
},
{ "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
0, 0
},
{ "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
0, 0
},
{ "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
0, 0
},
{ "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
0, 0
},
{ "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
0, 0
}
};
static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
void *inject_if, uint32_t instance_mask)
{
struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
int ret;
struct ta_ras_trigger_error_input block_info = { 0 };
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
return -EINVAL;
if (info->head.sub_block_index >= ARRAY_SIZE(ras_gfx_subblocks))
return -EINVAL;
if (!ras_gfx_subblocks[info->head.sub_block_index].name)
return -EPERM;
if (!(ras_gfx_subblocks[info->head.sub_block_index].hw_supported_error_type &
info->head.type)) {
DRM_ERROR("GFX Subblock %s, hardware do not support type 0x%x\n",
ras_gfx_subblocks[info->head.sub_block_index].name,
info->head.type);
return -EPERM;
}
if (!(ras_gfx_subblocks[info->head.sub_block_index].sw_supported_error_type &
info->head.type)) {
DRM_ERROR("GFX Subblock %s, driver do not support type 0x%x\n",
ras_gfx_subblocks[info->head.sub_block_index].name,
info->head.type);
return -EPERM;
}
block_info.block_id = amdgpu_ras_block_to_ta(info->head.block);
block_info.sub_block_index =
ras_gfx_subblocks[info->head.sub_block_index].ta_subblock;
block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type);
block_info.address = info->address;
block_info.value = info->value;
mutex_lock(&adev->grbm_idx_mutex);
ret = psp_ras_trigger_error(&adev->psp, &block_info, instance_mask);
mutex_unlock(&adev->grbm_idx_mutex);
return ret;
}
static const char *vml2_mems[] = {
"UTC_VML2_BANK_CACHE_0_BIGK_MEM0",
"UTC_VML2_BANK_CACHE_0_BIGK_MEM1",
"UTC_VML2_BANK_CACHE_0_4K_MEM0",
"UTC_VML2_BANK_CACHE_0_4K_MEM1",
"UTC_VML2_BANK_CACHE_1_BIGK_MEM0",
"UTC_VML2_BANK_CACHE_1_BIGK_MEM1",
"UTC_VML2_BANK_CACHE_1_4K_MEM0",
"UTC_VML2_BANK_CACHE_1_4K_MEM1",
"UTC_VML2_BANK_CACHE_2_BIGK_MEM0",
"UTC_VML2_BANK_CACHE_2_BIGK_MEM1",
"UTC_VML2_BANK_CACHE_2_4K_MEM0",
"UTC_VML2_BANK_CACHE_2_4K_MEM1",
"UTC_VML2_BANK_CACHE_3_BIGK_MEM0",
"UTC_VML2_BANK_CACHE_3_BIGK_MEM1",
"UTC_VML2_BANK_CACHE_3_4K_MEM0",
"UTC_VML2_BANK_CACHE_3_4K_MEM1",
};
static const char *vml2_walker_mems[] = {
"UTC_VML2_CACHE_PDE0_MEM0",
"UTC_VML2_CACHE_PDE0_MEM1",
"UTC_VML2_CACHE_PDE1_MEM0",
"UTC_VML2_CACHE_PDE1_MEM1",
"UTC_VML2_CACHE_PDE2_MEM0",
"UTC_VML2_CACHE_PDE2_MEM1",
"UTC_VML2_RDIF_LOG_FIFO",
};
static const char *atc_l2_cache_2m_mems[] = {
"UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM",
"UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM",
"UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM",
"UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM",
};
static const char *atc_l2_cache_4k_mems[] = {
"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0",
"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1",
"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2",
"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3",
"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4",
"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5",
"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6",
"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7",
"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0",
"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1",
"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2",
"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3",
"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4",
"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5",
"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6",
"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7",
"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0",
"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1",
"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2",
"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3",
"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4",
"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5",
"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6",
"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7",
"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0",
"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1",
"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2",
"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3",
"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4",
"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5",
"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6",
"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7",
};
static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
struct ras_err_data *err_data)
{
uint32_t i, data;
uint32_t sec_count, ded_count;
WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT);
if (sec_count) {
dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
"SEC %d\n", i, vml2_mems[i], sec_count);
err_data->ce_count += sec_count;
}
ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT);
if (ded_count) {
dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
"DED %d\n", i, vml2_mems[i], ded_count);
err_data->ue_count += ded_count;
}
}
for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
SEC_COUNT);
if (sec_count) {
dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
"SEC %d\n", i, vml2_walker_mems[i], sec_count);
err_data->ce_count += sec_count;
}
ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
DED_COUNT);
if (ded_count) {
dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
"DED %d\n", i, vml2_walker_mems[i], ded_count);
err_data->ue_count += ded_count;
}
}
for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
sec_count = (data & 0x00006000L) >> 0xd;
if (sec_count) {
dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
"SEC %d\n", i, atc_l2_cache_2m_mems[i],
sec_count);
err_data->ce_count += sec_count;
}
}
for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
sec_count = (data & 0x00006000L) >> 0xd;
if (sec_count) {
dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
"SEC %d\n", i, atc_l2_cache_4k_mems[i],
sec_count);
err_data->ce_count += sec_count;
}
ded_count = (data & 0x00018000L) >> 0xf;
if (ded_count) {
dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
"DED %d\n", i, atc_l2_cache_4k_mems[i],
ded_count);
err_data->ue_count += ded_count;
}
}
WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
return 0;
}
static int gfx_v9_0_ras_error_count(struct amdgpu_device *adev,
const struct soc15_reg_entry *reg,
uint32_t se_id, uint32_t inst_id, uint32_t value,
uint32_t *sec_count, uint32_t *ded_count)
{
uint32_t i;
uint32_t sec_cnt, ded_cnt;
for (i = 0; i < ARRAY_SIZE(gfx_v9_0_ras_fields); i++) {
if(gfx_v9_0_ras_fields[i].reg_offset != reg->reg_offset ||
gfx_v9_0_ras_fields[i].seg != reg->seg ||
gfx_v9_0_ras_fields[i].inst != reg->inst)
continue;
sec_cnt = (value &
gfx_v9_0_ras_fields[i].sec_count_mask) >>
gfx_v9_0_ras_fields[i].sec_count_shift;
if (sec_cnt) {
dev_info(adev->dev, "GFX SubBlock %s, "
"Instance[%d][%d], SEC %d\n",
gfx_v9_0_ras_fields[i].name,
se_id, inst_id,
sec_cnt);
*sec_count += sec_cnt;
}
ded_cnt = (value &
gfx_v9_0_ras_fields[i].ded_count_mask) >>
gfx_v9_0_ras_fields[i].ded_count_shift;
if (ded_cnt) {
dev_info(adev->dev, "GFX SubBlock %s, "
"Instance[%d][%d], DED %d\n",
gfx_v9_0_ras_fields[i].name,
se_id, inst_id,
ded_cnt);
*ded_count += ded_cnt;
}
}
return 0;
}
static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev)
{
int i, j, k;
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
return;
/* read back registers to clear the counters */
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
amdgpu_gfx_select_se_sh(adev, j, 0x0, k, 0);
RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
}
}
}
WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
mutex_unlock(&adev->grbm_idx_mutex);
WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
}
for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
}
for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
}
for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
}
WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
}
static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
uint32_t sec_count = 0, ded_count = 0;
uint32_t i, j, k;
uint32_t reg_value;
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
return;
err_data->ue_count = 0;
err_data->ce_count = 0;
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
amdgpu_gfx_select_se_sh(adev, j, 0, k, 0);
reg_value =
RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
if (reg_value)
gfx_v9_0_ras_error_count(adev,
&gfx_v9_0_edc_counter_regs[i],
j, k, reg_value,
&sec_count, &ded_count);
}
}
}
err_data->ce_count += sec_count;
err_data->ue_count += ded_count;
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
gfx_v9_0_query_utc_edc_status(adev, err_data);
}
static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring)
{
const unsigned int cp_coher_cntl =
PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
/* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */
amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */
amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
}
static void gfx_v9_0_emit_wave_limit_cs(struct amdgpu_ring *ring,
uint32_t pipe, bool enable)
{
struct amdgpu_device *adev = ring->adev;
uint32_t val;
uint32_t wcl_cs_reg;
/* mmSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
val = enable ? 0x1 : mmSPI_WCL_PIPE_PERCENT_CS0_DEFAULT;
switch (pipe) {
case 0:
wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS0);
break;
case 1:
wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS1);
break;
case 2:
wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS2);
break;
case 3:
wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS3);
break;
default:
DRM_DEBUG("invalid pipe %d\n", pipe);
return;
}
amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
}
static void gfx_v9_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
{
struct amdgpu_device *adev = ring->adev;
uint32_t val;
int i;
/* mmSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
* number of gfx waves. Setting 5 bit will make sure gfx only gets
* around 25% of gpu resources.
*/
val = enable ? 0x1f : mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT;
amdgpu_ring_emit_wreg(ring,
SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX),
val);
/* Restrict waves for normal/low priority compute queues as well
* to get best QoS for high priority compute jobs.
*
* amdgpu controls only 1st ME(0-3 CS pipes).
*/
for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
if (i != ring->pipe)
gfx_v9_0_emit_wave_limit_cs(ring, i, enable);
}
}
static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
.name = "gfx_v9_0",
.early_init = gfx_v9_0_early_init,
.late_init = gfx_v9_0_late_init,
.sw_init = gfx_v9_0_sw_init,
.sw_fini = gfx_v9_0_sw_fini,
.hw_init = gfx_v9_0_hw_init,
.hw_fini = gfx_v9_0_hw_fini,
.suspend = gfx_v9_0_suspend,
.resume = gfx_v9_0_resume,
.is_idle = gfx_v9_0_is_idle,
.wait_for_idle = gfx_v9_0_wait_for_idle,
.soft_reset = gfx_v9_0_soft_reset,
.set_clockgating_state = gfx_v9_0_set_clockgating_state,
.set_powergating_state = gfx_v9_0_set_powergating_state,
.get_clockgating_state = gfx_v9_0_get_clockgating_state,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.type = AMDGPU_RING_TYPE_GFX,
.align_mask = 0xff,
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
.support_64bit_ptrs = true,
.secure_submission_supported = true,
.get_rptr = gfx_v9_0_ring_get_rptr_gfx,
.get_wptr = gfx_v9_0_ring_get_wptr_gfx,
.set_wptr = gfx_v9_0_ring_set_wptr_gfx,
.emit_frame_size = /* totally 242 maximum if 16 IBs */
5 + /* COND_EXEC */
7 + /* PIPELINE_SYNC */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
2 + /* VM_FLUSH */
8 + /* FENCE for VM_FLUSH */
20 + /* GDS switch */
4 + /* double SWITCH_BUFFER,
the first COND_EXEC jump to the place just
prior to this double SWITCH_BUFFER */
5 + /* COND_EXEC */
7 + /* HDP_flush */
4 + /* VGT_flush */
14 + /* CE_META */
31 + /* DE_META */
3 + /* CNTX_CTRL */
5 + /* HDP_INVL */
8 + 8 + /* FENCE x2 */
2 + /* SWITCH_BUFFER */
7, /* gfx_v9_0_emit_mem_sync */
.emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
.emit_ib = gfx_v9_0_ring_emit_ib_gfx,
.emit_fence = gfx_v9_0_ring_emit_fence,
.emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
.emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
.test_ring = gfx_v9_0_ring_test_ring,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_switch_buffer = gfx_v9_ring_emit_sb,
.emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
.init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
.patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
.preempt_ib = gfx_v9_0_ring_preempt_ib,
.emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl,
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v9_0_ring_soft_recovery,
.emit_mem_sync = gfx_v9_0_emit_mem_sync,
};
static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = {
.type = AMDGPU_RING_TYPE_GFX,
.align_mask = 0xff,
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
.support_64bit_ptrs = true,
.secure_submission_supported = true,
.get_rptr = amdgpu_sw_ring_get_rptr_gfx,
.get_wptr = amdgpu_sw_ring_get_wptr_gfx,
.set_wptr = amdgpu_sw_ring_set_wptr_gfx,
.emit_frame_size = /* totally 242 maximum if 16 IBs */
5 + /* COND_EXEC */
7 + /* PIPELINE_SYNC */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
2 + /* VM_FLUSH */
8 + /* FENCE for VM_FLUSH */
20 + /* GDS switch */
4 + /* double SWITCH_BUFFER,
* the first COND_EXEC jump to the place just
* prior to this double SWITCH_BUFFER
*/
5 + /* COND_EXEC */
7 + /* HDP_flush */
4 + /* VGT_flush */
14 + /* CE_META */
31 + /* DE_META */
3 + /* CNTX_CTRL */
5 + /* HDP_INVL */
8 + 8 + /* FENCE x2 */
2 + /* SWITCH_BUFFER */
7, /* gfx_v9_0_emit_mem_sync */
.emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
.emit_ib = gfx_v9_0_ring_emit_ib_gfx,
.emit_fence = gfx_v9_0_ring_emit_fence,
.emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
.emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
.test_ring = gfx_v9_0_ring_test_ring,
.test_ib = gfx_v9_0_ring_test_ib,
.insert_nop = amdgpu_sw_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_switch_buffer = gfx_v9_ring_emit_sb,
.emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
.init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
.patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
.emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl,
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v9_0_ring_soft_recovery,
.emit_mem_sync = gfx_v9_0_emit_mem_sync,
.patch_cntl = gfx_v9_0_ring_patch_cntl,
.patch_de = gfx_v9_0_ring_patch_de_meta,
.patch_ce = gfx_v9_0_ring_patch_ce_meta,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
.type = AMDGPU_RING_TYPE_COMPUTE,
.align_mask = 0xff,
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
.support_64bit_ptrs = true,
.get_rptr = gfx_v9_0_ring_get_rptr_compute,
.get_wptr = gfx_v9_0_ring_get_wptr_compute,
.set_wptr = gfx_v9_0_ring_set_wptr_compute,
.emit_frame_size =
20 + /* gfx_v9_0_ring_emit_gds_switch */
7 + /* gfx_v9_0_ring_emit_hdp_flush */
5 + /* hdp invalidate */
7 + /* gfx_v9_0_ring_emit_pipeline_sync */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
2 + /* gfx_v9_0_ring_emit_vm_flush */
8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
7 + /* gfx_v9_0_emit_mem_sync */
5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
15, /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */
.emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
.emit_ib = gfx_v9_0_ring_emit_ib_compute,
.emit_fence = gfx_v9_0_ring_emit_fence,
.emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
.emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
.test_ring = gfx_v9_0_ring_test_ring,
.test_ib = gfx_v9_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
.emit_mem_sync = gfx_v9_0_emit_mem_sync,
.emit_wave_limit = gfx_v9_0_emit_wave_limit,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
.type = AMDGPU_RING_TYPE_KIQ,
.align_mask = 0xff,
.nop = PACKET3(PACKET3_NOP, 0x3FFF),
.support_64bit_ptrs = true,
.get_rptr = gfx_v9_0_ring_get_rptr_compute,
.get_wptr = gfx_v9_0_ring_get_wptr_compute,
.set_wptr = gfx_v9_0_ring_set_wptr_compute,
.emit_frame_size =
20 + /* gfx_v9_0_ring_emit_gds_switch */
7 + /* gfx_v9_0_ring_emit_hdp_flush */
5 + /* hdp invalidate */
7 + /* gfx_v9_0_ring_emit_pipeline_sync */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
2 + /* gfx_v9_0_ring_emit_vm_flush */
8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
.emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
.emit_fence = gfx_v9_0_ring_emit_fence_kiq,
.test_ring = gfx_v9_0_ring_test_ring,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_rreg = gfx_v9_0_ring_emit_rreg,
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
};
static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
adev->gfx.kiq[0].ring.funcs = &gfx_v9_0_ring_funcs_kiq;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
if (adev->gfx.num_gfx_rings) {
for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++)
adev->gfx.sw_gfx_ring[i].funcs = &gfx_v9_0_sw_ring_funcs_gfx;
}
for (i = 0; i < adev->gfx.num_compute_rings; i++)
adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
}
static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
.set = gfx_v9_0_set_eop_interrupt_state,
.process = gfx_v9_0_eop_irq,
};
static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
.set = gfx_v9_0_set_priv_reg_fault_state,
.process = gfx_v9_0_priv_reg_irq,
};
static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
.set = gfx_v9_0_set_priv_inst_fault_state,
.process = gfx_v9_0_priv_inst_irq,
};
static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = {
.set = gfx_v9_0_set_cp_ecc_error_state,
.process = amdgpu_gfx_cp_ecc_error_irq,
};
static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs;
adev->gfx.priv_reg_irq.num_types = 1;
adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
adev->gfx.priv_inst_irq.num_types = 1;
adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
adev->gfx.cp_ecc_error_irq.num_types = 2; /*C5 ECC error and C9 FUE error*/
adev->gfx.cp_ecc_error_irq.funcs = &gfx_v9_0_cp_ecc_error_irq_funcs;
}
static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
{
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 0, 1):
case IP_VERSION(9, 2, 1):
case IP_VERSION(9, 4, 0):
case IP_VERSION(9, 2, 2):
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 3, 0):
case IP_VERSION(9, 4, 2):
adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
break;
default:
break;
}
}
static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
{
/* init asci gds info */
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 0, 1):
case IP_VERSION(9, 2, 1):
case IP_VERSION(9, 4, 0):
adev->gds.gds_size = 0x10000;
break;
case IP_VERSION(9, 2, 2):
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 4, 1):
adev->gds.gds_size = 0x1000;
break;
case IP_VERSION(9, 4, 2):
/* aldebaran removed all the GDS internal memory,
* only support GWS opcode in kernel, like barrier
* semaphore.etc */
adev->gds.gds_size = 0;
break;
default:
adev->gds.gds_size = 0x10000;
break;
}
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 0, 1):
case IP_VERSION(9, 4, 0):
adev->gds.gds_compute_max_wave_id = 0x7ff;
break;
case IP_VERSION(9, 2, 1):
adev->gds.gds_compute_max_wave_id = 0x27f;
break;
case IP_VERSION(9, 2, 2):
case IP_VERSION(9, 1, 0):
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */
else
adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
break;
case IP_VERSION(9, 4, 1):
adev->gds.gds_compute_max_wave_id = 0xfff;
break;
case IP_VERSION(9, 4, 2):
/* deprecated for Aldebaran, no usage at all */
adev->gds.gds_compute_max_wave_id = 0;
break;
default:
/* this really depends on the chip */
adev->gds.gds_compute_max_wave_id = 0x7ff;
break;
}
adev->gds.gws_size = 64;
adev->gds.oa_size = 16;
}
static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
u32 bitmap)
{
u32 data;
if (!bitmap)
return;
data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
}
static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
{
u32 data, mask;
data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
return (~data) & mask;
}
static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
struct amdgpu_cu_info *cu_info)
{
int i, j, k, counter, active_cu_number = 0;
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
unsigned disable_masks[4 * 4];
if (!adev || !cu_info)
return -EINVAL;
/*
* 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
*/
if (adev->gfx.config.max_shader_engines *
adev->gfx.config.max_sh_per_se > 16)
return -EINVAL;
amdgpu_gfx_parse_disable_cu(disable_masks,
adev->gfx.config.max_shader_engines,
adev->gfx.config.max_sh_per_se);
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
mask = 1;
ao_bitmap = 0;
counter = 0;
amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
gfx_v9_0_set_user_cu_inactive_bitmap(
adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
/*
* The bitmap(and ao_cu_bitmap) in cu_info structure is
* 4x4 size array, and it's usually suitable for Vega
* ASICs which has 4*2 SE/SH layout.
* But for Arcturus, SE/SH layout is changed to 8*1.
* To mostly reduce the impact, we make it compatible
* with current bitmap array as below:
* SE4,SH0 --> bitmap[0][1]
* SE5,SH0 --> bitmap[1][1]
* SE6,SH0 --> bitmap[2][1]
* SE7,SH0 --> bitmap[3][1]
*/
cu_info->bitmap[0][i % 4][j + i / 4] = bitmap;
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
if (bitmap & mask) {
if (counter < adev->gfx.config.max_cu_per_sh)
ao_bitmap |= mask;
counter ++;
}
mask <<= 1;
}
active_cu_number += counter;
if (i < 2 && j < 2)
ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
}
}
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
cu_info->number = active_cu_number;
cu_info->ao_cu_mask = ao_cu_mask;
cu_info->simd_per_cu = NUM_SIMD_PER_CU;
return 0;
}
const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_GFX,
.major = 9,
.minor = 0,
.rev = 0,
.funcs = &gfx_v9_0_ip_funcs,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c |
/*
* Copyright 2007-8 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
*/
#include <linux/export.h>
#include <linux/pci.h>
#include <drm/drm_edid.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_i2c.h"
#include "amdgpu_atombios.h"
#include "atom.h"
#include "atombios_dp.h"
#include "atombios_i2c.h"
/* bit banging i2c */
static int amdgpu_i2c_pre_xfer(struct i2c_adapter *i2c_adap)
{
struct amdgpu_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
struct amdgpu_device *adev = drm_to_adev(i2c->dev);
struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
uint32_t temp;
mutex_lock(&i2c->mutex);
/* switch the pads to ddc mode */
if (rec->hw_capable) {
temp = RREG32(rec->mask_clk_reg);
temp &= ~(1 << 16);
WREG32(rec->mask_clk_reg, temp);
}
/* clear the output pin values */
temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask;
WREG32(rec->a_clk_reg, temp);
temp = RREG32(rec->a_data_reg) & ~rec->a_data_mask;
WREG32(rec->a_data_reg, temp);
/* set the pins to input */
temp = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
WREG32(rec->en_clk_reg, temp);
temp = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
WREG32(rec->en_data_reg, temp);
/* mask the gpio pins for software use */
temp = RREG32(rec->mask_clk_reg) | rec->mask_clk_mask;
WREG32(rec->mask_clk_reg, temp);
temp = RREG32(rec->mask_clk_reg);
temp = RREG32(rec->mask_data_reg) | rec->mask_data_mask;
WREG32(rec->mask_data_reg, temp);
temp = RREG32(rec->mask_data_reg);
return 0;
}
static void amdgpu_i2c_post_xfer(struct i2c_adapter *i2c_adap)
{
struct amdgpu_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
struct amdgpu_device *adev = drm_to_adev(i2c->dev);
struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
uint32_t temp;
/* unmask the gpio pins for software use */
temp = RREG32(rec->mask_clk_reg) & ~rec->mask_clk_mask;
WREG32(rec->mask_clk_reg, temp);
temp = RREG32(rec->mask_clk_reg);
temp = RREG32(rec->mask_data_reg) & ~rec->mask_data_mask;
WREG32(rec->mask_data_reg, temp);
temp = RREG32(rec->mask_data_reg);
mutex_unlock(&i2c->mutex);
}
static int amdgpu_i2c_get_clock(void *i2c_priv)
{
struct amdgpu_i2c_chan *i2c = i2c_priv;
struct amdgpu_device *adev = drm_to_adev(i2c->dev);
struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
/* read the value off the pin */
val = RREG32(rec->y_clk_reg);
val &= rec->y_clk_mask;
return (val != 0);
}
static int amdgpu_i2c_get_data(void *i2c_priv)
{
struct amdgpu_i2c_chan *i2c = i2c_priv;
struct amdgpu_device *adev = drm_to_adev(i2c->dev);
struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
/* read the value off the pin */
val = RREG32(rec->y_data_reg);
val &= rec->y_data_mask;
return (val != 0);
}
static void amdgpu_i2c_set_clock(void *i2c_priv, int clock)
{
struct amdgpu_i2c_chan *i2c = i2c_priv;
struct amdgpu_device *adev = drm_to_adev(i2c->dev);
struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
/* set pin direction */
val = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
val |= clock ? 0 : rec->en_clk_mask;
WREG32(rec->en_clk_reg, val);
}
static void amdgpu_i2c_set_data(void *i2c_priv, int data)
{
struct amdgpu_i2c_chan *i2c = i2c_priv;
struct amdgpu_device *adev = drm_to_adev(i2c->dev);
struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
/* set pin direction */
val = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
val |= data ? 0 : rec->en_data_mask;
WREG32(rec->en_data_reg, val);
}
static const struct i2c_algorithm amdgpu_atombios_i2c_algo = {
.master_xfer = amdgpu_atombios_i2c_xfer,
.functionality = amdgpu_atombios_i2c_func,
};
struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
const struct amdgpu_i2c_bus_rec *rec,
const char *name)
{
struct amdgpu_i2c_chan *i2c;
int ret;
/* don't add the mm_i2c bus unless hw_i2c is enabled */
if (rec->mm_i2c && (amdgpu_hw_i2c == 0))
return NULL;
i2c = kzalloc(sizeof(struct amdgpu_i2c_chan), GFP_KERNEL);
if (i2c == NULL)
return NULL;
i2c->rec = *rec;
i2c->adapter.owner = THIS_MODULE;
i2c->adapter.class = I2C_CLASS_DDC;
i2c->adapter.dev.parent = dev->dev;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
mutex_init(&i2c->mutex);
if (rec->hw_capable &&
amdgpu_hw_i2c) {
/* hw i2c using atom */
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
"AMDGPU i2c hw bus %s", name);
i2c->adapter.algo = &amdgpu_atombios_i2c_algo;
ret = i2c_add_adapter(&i2c->adapter);
if (ret)
goto out_free;
} else {
/* set the amdgpu bit adapter */
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
"AMDGPU i2c bit bus %s", name);
i2c->adapter.algo_data = &i2c->bit;
i2c->bit.pre_xfer = amdgpu_i2c_pre_xfer;
i2c->bit.post_xfer = amdgpu_i2c_post_xfer;
i2c->bit.setsda = amdgpu_i2c_set_data;
i2c->bit.setscl = amdgpu_i2c_set_clock;
i2c->bit.getsda = amdgpu_i2c_get_data;
i2c->bit.getscl = amdgpu_i2c_get_clock;
i2c->bit.udelay = 10;
i2c->bit.timeout = usecs_to_jiffies(2200); /* from VESA */
i2c->bit.data = i2c;
ret = i2c_bit_add_bus(&i2c->adapter);
if (ret) {
DRM_ERROR("Failed to register bit i2c %s\n", name);
goto out_free;
}
}
return i2c;
out_free:
kfree(i2c);
return NULL;
}
void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c)
{
if (!i2c)
return;
WARN_ON(i2c->has_aux);
i2c_del_adapter(&i2c->adapter);
kfree(i2c);
}
/* Add the default buses */
void amdgpu_i2c_init(struct amdgpu_device *adev)
{
if (amdgpu_hw_i2c)
DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n");
amdgpu_atombios_i2c_init(adev);
}
/* remove all the buses */
void amdgpu_i2c_fini(struct amdgpu_device *adev)
{
int i;
for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) {
if (adev->i2c_bus[i]) {
amdgpu_i2c_destroy(adev->i2c_bus[i]);
adev->i2c_bus[i] = NULL;
}
}
}
/* Add additional buses */
void amdgpu_i2c_add(struct amdgpu_device *adev,
const struct amdgpu_i2c_bus_rec *rec,
const char *name)
{
struct drm_device *dev = adev_to_drm(adev);
int i;
for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) {
if (!adev->i2c_bus[i]) {
adev->i2c_bus[i] = amdgpu_i2c_create(dev, rec, name);
return;
}
}
}
/* looks up bus based on id */
struct amdgpu_i2c_chan *
amdgpu_i2c_lookup(struct amdgpu_device *adev,
const struct amdgpu_i2c_bus_rec *i2c_bus)
{
int i;
for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) {
if (adev->i2c_bus[i] &&
(adev->i2c_bus[i]->rec.i2c_id == i2c_bus->i2c_id)) {
return adev->i2c_bus[i];
}
}
return NULL;
}
static void amdgpu_i2c_get_byte(struct amdgpu_i2c_chan *i2c_bus,
u8 slave_addr,
u8 addr,
u8 *val)
{
u8 out_buf[2];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
.addr = slave_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
.addr = slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
}
};
out_buf[0] = addr;
out_buf[1] = 0;
if (i2c_transfer(&i2c_bus->adapter, msgs, 2) == 2) {
*val = in_buf[0];
DRM_DEBUG("val = 0x%02x\n", *val);
} else {
DRM_DEBUG("i2c 0x%02x 0x%02x read failed\n",
addr, *val);
}
}
static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus,
u8 slave_addr,
u8 addr,
u8 val)
{
uint8_t out_buf[2];
struct i2c_msg msg = {
.addr = slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
};
out_buf[0] = addr;
out_buf[1] = val;
if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1)
DRM_DEBUG("i2c 0x%02x 0x%02x write failed\n",
addr, val);
}
/* ddc router switching */
void
amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *amdgpu_connector)
{
u8 val = 0;
if (!amdgpu_connector->router.ddc_valid)
return;
if (!amdgpu_connector->router_bus)
return;
amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
amdgpu_connector->router.i2c_addr,
0x3, &val);
val &= ~amdgpu_connector->router.ddc_mux_control_pin;
amdgpu_i2c_put_byte(amdgpu_connector->router_bus,
amdgpu_connector->router.i2c_addr,
0x3, val);
amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
amdgpu_connector->router.i2c_addr,
0x1, &val);
val &= ~amdgpu_connector->router.ddc_mux_control_pin;
val |= amdgpu_connector->router.ddc_mux_state;
amdgpu_i2c_put_byte(amdgpu_connector->router_bus,
amdgpu_connector->router.i2c_addr,
0x1, val);
}
/* clock/data router switching */
void
amdgpu_i2c_router_select_cd_port(const struct amdgpu_connector *amdgpu_connector)
{
u8 val;
if (!amdgpu_connector->router.cd_valid)
return;
if (!amdgpu_connector->router_bus)
return;
amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
amdgpu_connector->router.i2c_addr,
0x3, &val);
val &= ~amdgpu_connector->router.cd_mux_control_pin;
amdgpu_i2c_put_byte(amdgpu_connector->router_bus,
amdgpu_connector->router.i2c_addr,
0x3, val);
amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
amdgpu_connector->router.i2c_addr,
0x1, &val);
val &= ~amdgpu_connector->router.cd_mux_control_pin;
val |= amdgpu_connector->router.cd_mux_state;
amdgpu_i2c_put_byte(amdgpu_connector->router_bus,
amdgpu_connector->router.i2c_addr,
0x1, val);
}
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c |
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "athub_v1_0.h"
#include "athub/athub_1_0_offset.h"
#include "athub/athub_1_0_sh_mask.h"
#include "vega10_enum.h"
#include "soc15_common.h"
static void athub_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t def, data;
def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
else
data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
if (def != data)
WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
}
static void athub_update_medium_grain_light_sleep(struct amdgpu_device *adev,
bool enable)
{
uint32_t def, data;
def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) &&
(adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
else
data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
if(def != data)
WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
}
int athub_v1_0_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state)
{
if (amdgpu_sriov_vf(adev))
return 0;
switch (adev->ip_versions[ATHUB_HWIP][0]) {
case IP_VERSION(9, 0, 0):
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 0):
case IP_VERSION(9, 3, 0):
case IP_VERSION(9, 4, 0):
case IP_VERSION(1, 5, 0):
athub_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
athub_update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE);
break;
default:
break;
}
return 0;
}
void athub_v1_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
{
int data;
if (amdgpu_sriov_vf(adev))
*flags = 0;
/* AMD_CG_SUPPORT_ATHUB_MGCG */
data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
*flags |= AMD_CG_SUPPORT_ATHUB_MGCG;
/* AMD_CG_SUPPORT_ATHUB_LS */
if (data & ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK)
*flags |= AMD_CG_SUPPORT_ATHUB_LS;
}
| linux-master | drivers/gpu/drm/amd/amdgpu/athub_v1_0.c |
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
* Copyright 2009 Jerome Glisse.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
* Jerome Glisse
* Christian König
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "atom.h"
/*
* Rings
* Most engines on the GPU are fed via ring buffers. Ring
* buffers are areas of GPU accessible memory that the host
* writes commands into and the GPU reads commands out of.
* There is a rptr (read pointer) that determines where the
* GPU is currently reading, and a wptr (write pointer)
* which determines where the host has written. When the
* pointers are equal, the ring is idle. When the host
* writes commands to the ring buffer, it increments the
* wptr. The GPU then starts fetching commands and executes
* them until the pointers are equal again.
*/
/**
* amdgpu_ring_max_ibs - Return max IBs that fit in a single submission.
*
* @type: ring type for which to return the limit.
*/
unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type)
{
switch (type) {
case AMDGPU_RING_TYPE_GFX:
/* Need to keep at least 192 on GFX7+ for old radv. */
return 192;
case AMDGPU_RING_TYPE_COMPUTE:
return 125;
case AMDGPU_RING_TYPE_VCN_JPEG:
return 16;
default:
return 49;
}
}
/**
* amdgpu_ring_alloc - allocate space on the ring buffer
*
* @ring: amdgpu_ring structure holding ring information
* @ndw: number of dwords to allocate in the ring buffer
*
* Allocate @ndw dwords in the ring buffer (all asics).
* Returns 0 on success, error on failure.
*/
int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned int ndw)
{
/* Align requested size with padding so unlock_commit can
* pad safely */
ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
/* Make sure we aren't trying to allocate more space
* than the maximum for one submission
*/
if (WARN_ON_ONCE(ndw > ring->max_dw))
return -ENOMEM;
ring->count_dw = ndw;
ring->wptr_old = ring->wptr;
if (ring->funcs->begin_use)
ring->funcs->begin_use(ring);
return 0;
}
/** amdgpu_ring_insert_nop - insert NOP packets
*
* @ring: amdgpu_ring structure holding ring information
* @count: the number of NOP packets to insert
*
* This is the generic insert_nop function for rings except SDMA
*/
void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
int i;
for (i = 0; i < count; i++)
amdgpu_ring_write(ring, ring->funcs->nop);
}
/**
* amdgpu_ring_generic_pad_ib - pad IB with NOP packets
*
* @ring: amdgpu_ring structure holding ring information
* @ib: IB to add NOP packets to
*
* This is the generic pad_ib function for rings except SDMA
*/
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
while (ib->length_dw & ring->funcs->align_mask)
ib->ptr[ib->length_dw++] = ring->funcs->nop;
}
/**
* amdgpu_ring_commit - tell the GPU to execute the new
* commands on the ring buffer
*
* @ring: amdgpu_ring structure holding ring information
*
* Update the wptr (write pointer) to tell the GPU to
* execute new commands on the ring buffer (all asics).
*/
void amdgpu_ring_commit(struct amdgpu_ring *ring)
{
uint32_t count;
/* We pad to match fetch size */
count = ring->funcs->align_mask + 1 -
(ring->wptr & ring->funcs->align_mask);
count %= ring->funcs->align_mask + 1;
ring->funcs->insert_nop(ring, count);
mb();
amdgpu_ring_set_wptr(ring);
if (ring->funcs->end_use)
ring->funcs->end_use(ring);
}
/**
* amdgpu_ring_undo - reset the wptr
*
* @ring: amdgpu_ring structure holding ring information
*
* Reset the driver's copy of the wptr (all asics).
*/
void amdgpu_ring_undo(struct amdgpu_ring *ring)
{
ring->wptr = ring->wptr_old;
if (ring->funcs->end_use)
ring->funcs->end_use(ring);
}
#define amdgpu_ring_get_gpu_addr(ring, offset) \
(ring->is_mes_queue ? \
(ring->mes_ctx->meta_data_gpu_addr + offset) : \
(ring->adev->wb.gpu_addr + offset * 4))
#define amdgpu_ring_get_cpu_addr(ring, offset) \
(ring->is_mes_queue ? \
(void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \
(&ring->adev->wb.wb[offset]))
/**
* amdgpu_ring_init - init driver ring struct.
*
* @adev: amdgpu_device pointer
* @ring: amdgpu_ring structure holding ring information
* @max_dw: maximum number of dw for ring alloc
* @irq_src: interrupt source to use for this ring
* @irq_type: interrupt type to use for this ring
* @hw_prio: ring priority (NORMAL/HIGH)
* @sched_score: optional score atomic shared with other schedulers
*
* Initialize the driver information for the selected ring (all asics).
* Returns 0 on success, error on failure.
*/
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned int max_dw, struct amdgpu_irq_src *irq_src,
unsigned int irq_type, unsigned int hw_prio,
atomic_t *sched_score)
{
int r;
int sched_hw_submission = amdgpu_sched_hw_submission;
u32 *num_sched;
u32 hw_ip;
unsigned int max_ibs_dw;
/* Set the hw submission limit higher for KIQ because
* it's used for a number of gfx/compute tasks by both
* KFD and KGD which may have outstanding fences and
* it doesn't really use the gpu scheduler anyway;
* KIQ tasks get submitted directly to the ring.
*/
if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
sched_hw_submission = max(sched_hw_submission, 256);
else if (ring == &adev->sdma.instance[0].page)
sched_hw_submission = 256;
if (ring->adev == NULL) {
if (adev->num_rings >= AMDGPU_MAX_RINGS)
return -EINVAL;
ring->adev = adev;
ring->num_hw_submission = sched_hw_submission;
ring->sched_score = sched_score;
ring->vmid_wait = dma_fence_get_stub();
if (!ring->is_mes_queue) {
ring->idx = adev->num_rings++;
adev->rings[ring->idx] = ring;
}
r = amdgpu_fence_driver_init_ring(ring);
if (r)
return r;
}
if (ring->is_mes_queue) {
ring->rptr_offs = amdgpu_mes_ctx_get_offs(ring,
AMDGPU_MES_CTX_RPTR_OFFS);
ring->wptr_offs = amdgpu_mes_ctx_get_offs(ring,
AMDGPU_MES_CTX_WPTR_OFFS);
ring->fence_offs = amdgpu_mes_ctx_get_offs(ring,
AMDGPU_MES_CTX_FENCE_OFFS);
ring->trail_fence_offs = amdgpu_mes_ctx_get_offs(ring,
AMDGPU_MES_CTX_TRAIL_FENCE_OFFS);
ring->cond_exe_offs = amdgpu_mes_ctx_get_offs(ring,
AMDGPU_MES_CTX_COND_EXE_OFFS);
} else {
r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
if (r) {
dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
return r;
}
r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
if (r) {
dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
return r;
}
r = amdgpu_device_wb_get(adev, &ring->fence_offs);
if (r) {
dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
return r;
}
r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
if (r) {
dev_err(adev->dev, "(%d) ring trail_fence_offs wb alloc failed\n", r);
return r;
}
r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
if (r) {
dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
return r;
}
}
ring->fence_gpu_addr =
amdgpu_ring_get_gpu_addr(ring, ring->fence_offs);
ring->fence_cpu_addr =
amdgpu_ring_get_cpu_addr(ring, ring->fence_offs);
ring->rptr_gpu_addr =
amdgpu_ring_get_gpu_addr(ring, ring->rptr_offs);
ring->rptr_cpu_addr =
amdgpu_ring_get_cpu_addr(ring, ring->rptr_offs);
ring->wptr_gpu_addr =
amdgpu_ring_get_gpu_addr(ring, ring->wptr_offs);
ring->wptr_cpu_addr =
amdgpu_ring_get_cpu_addr(ring, ring->wptr_offs);
ring->trail_fence_gpu_addr =
amdgpu_ring_get_gpu_addr(ring, ring->trail_fence_offs);
ring->trail_fence_cpu_addr =
amdgpu_ring_get_cpu_addr(ring, ring->trail_fence_offs);
ring->cond_exe_gpu_addr =
amdgpu_ring_get_gpu_addr(ring, ring->cond_exe_offs);
ring->cond_exe_cpu_addr =
amdgpu_ring_get_cpu_addr(ring, ring->cond_exe_offs);
/* always set cond_exec_polling to CONTINUE */
*ring->cond_exe_cpu_addr = 1;
r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
if (r) {
dev_err(adev->dev, "failed initializing fences (%d).\n", r);
return r;
}
max_ibs_dw = ring->funcs->emit_frame_size +
amdgpu_ring_max_ibs(ring->funcs->type) * ring->funcs->emit_ib_size;
max_ibs_dw = (max_ibs_dw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
if (WARN_ON(max_ibs_dw > max_dw))
max_dw = max_ibs_dw;
ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission);
ring->buf_mask = (ring->ring_size / 4) - 1;
ring->ptr_mask = ring->funcs->support_64bit_ptrs ?
0xffffffffffffffff : ring->buf_mask;
/* Allocate ring buffer */
if (ring->is_mes_queue) {
int offset = 0;
BUG_ON(ring->ring_size > PAGE_SIZE*4);
offset = amdgpu_mes_ctx_get_offs(ring,
AMDGPU_MES_CTX_RING_OFFS);
ring->gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
ring->ring = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
amdgpu_ring_clear_ring(ring);
} else if (ring->ring_obj == NULL) {
r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&ring->ring_obj,
&ring->gpu_addr,
(void **)&ring->ring);
if (r) {
dev_err(adev->dev, "(%d) ring create failed\n", r);
return r;
}
amdgpu_ring_clear_ring(ring);
}
ring->max_dw = max_dw;
ring->hw_prio = hw_prio;
if (!ring->no_scheduler) {
hw_ip = ring->funcs->type;
num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds;
adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] =
&ring->sched;
}
return 0;
}
/**
* amdgpu_ring_fini - tear down the driver ring struct.
*
* @ring: amdgpu_ring structure holding ring information
*
* Tear down the driver information for the selected ring (all asics).
*/
void amdgpu_ring_fini(struct amdgpu_ring *ring)
{
/* Not to finish a ring which is not initialized */
if (!(ring->adev) ||
(!ring->is_mes_queue && !(ring->adev->rings[ring->idx])))
return;
ring->sched.ready = false;
if (!ring->is_mes_queue) {
amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
amdgpu_device_wb_free(ring->adev, ring->fence_offs);
amdgpu_bo_free_kernel(&ring->ring_obj,
&ring->gpu_addr,
(void **)&ring->ring);
} else {
kfree(ring->fence_drv.fences);
}
dma_fence_put(ring->vmid_wait);
ring->vmid_wait = NULL;
ring->me = 0;
if (!ring->is_mes_queue)
ring->adev->rings[ring->idx] = NULL;
}
/**
* amdgpu_ring_emit_reg_write_reg_wait_helper - ring helper
*
* @ring: ring to write to
* @reg0: register to write
* @reg1: register to wait on
* @ref: reference value to write/wait on
* @mask: mask to wait on
*
* Helper for rings that don't support write and wait in a
* single oneshot packet.
*/
void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
uint32_t reg0, uint32_t reg1,
uint32_t ref, uint32_t mask)
{
amdgpu_ring_emit_wreg(ring, reg0, ref);
amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
}
/**
* amdgpu_ring_soft_recovery - try to soft recover a ring lockup
*
* @ring: ring to try the recovery on
* @vmid: VMID we try to get going again
* @fence: timedout fence
*
* Tries to get a ring proceeding again when it is stuck.
*/
bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
struct dma_fence *fence)
{
unsigned long flags;
ktime_t deadline = ktime_add_us(ktime_get(), 10000);
if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
return false;
spin_lock_irqsave(fence->lock, flags);
if (!dma_fence_is_signaled_locked(fence))
dma_fence_set_error(fence, -ENODATA);
spin_unlock_irqrestore(fence->lock, flags);
atomic_inc(&ring->adev->gpu_reset_counter);
while (!dma_fence_is_signaled(fence) &&
ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
ring->funcs->soft_recovery(ring, vmid);
return dma_fence_is_signaled(fence);
}
/*
* Debugfs info
*/
#if defined(CONFIG_DEBUG_FS)
/* Layout of file is 12 bytes consisting of
* - rptr
* - wptr
* - driver's copy of wptr
*
* followed by n-words of ring data
*/
static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_ring *ring = file_inode(f)->i_private;
int r, i;
uint32_t value, result, early[3];
if (*pos & 3 || size & 3)
return -EINVAL;
result = 0;
if (*pos < 12) {
early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
early[2] = ring->wptr & ring->buf_mask;
for (i = *pos / 4; i < 3 && size; i++) {
r = put_user(early[i], (uint32_t *)buf);
if (r)
return r;
buf += 4;
result += 4;
size -= 4;
*pos += 4;
}
}
while (size) {
if (*pos >= (ring->ring_size + 12))
return result;
value = ring->ring[(*pos - 12)/4];
r = put_user(value, (uint32_t *)buf);
if (r)
return r;
buf += 4;
result += 4;
size -= 4;
*pos += 4;
}
return result;
}
static const struct file_operations amdgpu_debugfs_ring_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_ring_read,
.llseek = default_llseek
};
static ssize_t amdgpu_debugfs_mqd_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_ring *ring = file_inode(f)->i_private;
volatile u32 *mqd;
int r;
uint32_t value, result;
if (*pos & 3 || size & 3)
return -EINVAL;
result = 0;
r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0))
return r;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd);
if (r) {
amdgpu_bo_unreserve(ring->mqd_obj);
return r;
}
while (size) {
if (*pos >= ring->mqd_size)
goto done;
value = mqd[*pos/4];
r = put_user(value, (uint32_t *)buf);
if (r)
goto done;
buf += 4;
result += 4;
size -= 4;
*pos += 4;
}
done:
amdgpu_bo_kunmap(ring->mqd_obj);
mqd = NULL;
amdgpu_bo_unreserve(ring->mqd_obj);
if (r)
return r;
return result;
}
static const struct file_operations amdgpu_debugfs_mqd_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_mqd_read,
.llseek = default_llseek
};
static int amdgpu_debugfs_ring_error(void *data, u64 val)
{
struct amdgpu_ring *ring = data;
amdgpu_fence_driver_set_error(ring, val);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(amdgpu_debugfs_error_fops, NULL,
amdgpu_debugfs_ring_error, "%lld\n");
#endif
void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
#if defined(CONFIG_DEBUG_FS)
struct drm_minor *minor = adev_to_drm(adev)->primary;
struct dentry *root = minor->debugfs_root;
char name[32];
sprintf(name, "amdgpu_ring_%s", ring->name);
debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
&amdgpu_debugfs_ring_fops,
ring->ring_size + 12);
if (ring->mqd_obj) {
sprintf(name, "amdgpu_mqd_%s", ring->name);
debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
&amdgpu_debugfs_mqd_fops,
ring->mqd_size);
}
sprintf(name, "amdgpu_error_%s", ring->name);
debugfs_create_file(name, 0200, root, ring,
&amdgpu_debugfs_error_fops);
#endif
}
/**
* amdgpu_ring_test_helper - tests ring and set sched readiness status
*
* @ring: ring to try the recovery on
*
* Tests ring and set sched readiness status
*
* Returns 0 on success, error on failure.
*/
int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
int r;
r = amdgpu_ring_test_ring(ring);
if (r)
DRM_DEV_ERROR(adev->dev, "ring %s test failed (%d)\n",
ring->name, r);
else
DRM_DEV_DEBUG(adev->dev, "ring test on %s succeeded\n",
ring->name);
ring->sched.ready = !r;
return r;
}
static void amdgpu_ring_to_mqd_prop(struct amdgpu_ring *ring,
struct amdgpu_mqd_prop *prop)
{
struct amdgpu_device *adev = ring->adev;
memset(prop, 0, sizeof(*prop));
prop->mqd_gpu_addr = ring->mqd_gpu_addr;
prop->hqd_base_gpu_addr = ring->gpu_addr;
prop->rptr_gpu_addr = ring->rptr_gpu_addr;
prop->wptr_gpu_addr = ring->wptr_gpu_addr;
prop->queue_size = ring->ring_size;
prop->eop_gpu_addr = ring->eop_gpu_addr;
prop->use_doorbell = ring->use_doorbell;
prop->doorbell_index = ring->doorbell_index;
/* map_queues packet doesn't need activate the queue,
* so only kiq need set this field.
*/
prop->hqd_active = ring->funcs->type == AMDGPU_RING_TYPE_KIQ;
if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE &&
amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) ||
(ring->funcs->type == AMDGPU_RING_TYPE_GFX &&
amdgpu_gfx_is_high_priority_graphics_queue(adev, ring))) {
prop->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
prop->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
}
}
int amdgpu_ring_init_mqd(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_mqd *mqd_mgr;
struct amdgpu_mqd_prop prop;
amdgpu_ring_to_mqd_prop(ring, &prop);
ring->wptr = 0;
if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
mqd_mgr = &adev->mqds[AMDGPU_HW_IP_COMPUTE];
else
mqd_mgr = &adev->mqds[ring->funcs->type];
return mqd_mgr->init_mqd(adev, ring->mqd_ptr, &prop);
}
void amdgpu_ring_ib_begin(struct amdgpu_ring *ring)
{
if (ring->is_sw_ring)
amdgpu_sw_ring_ib_begin(ring);
}
void amdgpu_ring_ib_end(struct amdgpu_ring *ring)
{
if (ring->is_sw_ring)
amdgpu_sw_ring_ib_end(ring);
}
void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring)
{
if (ring->is_sw_ring)
amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CONTROL);
}
void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring)
{
if (ring->is_sw_ring)
amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CE);
}
void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring)
{
if (ring->is_sw_ring)
amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_DE);
}
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*
*/
#include <linux/debugfs.h>
#include <linux/pm_runtime.h>
#include "amdgpu.h"
#include "amdgpu_rap.h"
/**
* DOC: AMDGPU RAP debugfs test interface
*
* how to use?
* echo opcode > <debugfs_dir>/dri/xxx/rap_test
*
* opcode:
* currently, only 2 is supported by Linux host driver,
* opcode 2 stands for TA_CMD_RAP__VALIDATE_L0, used to
* trigger L0 policy validation, you can refer more detail
* from header file ta_rap_if.h
*
*/
static ssize_t amdgpu_rap_debugfs_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
struct ta_rap_shared_memory *rap_shared_mem;
struct ta_rap_cmd_output_data *rap_cmd_output;
struct drm_device *dev = adev_to_drm(adev);
uint32_t op;
enum ta_rap_status status;
int ret;
if (*pos || size != 2)
return -EINVAL;
ret = kstrtouint_from_user(buf, size, *pos, &op);
if (ret)
return ret;
ret = pm_runtime_get_sync(dev->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(dev->dev);
return ret;
}
/* make sure gfx core is on, RAP TA cann't handle
* GFX OFF case currently.
*/
amdgpu_gfx_off_ctrl(adev, false);
switch (op) {
case 2:
ret = psp_rap_invoke(&adev->psp, op, &status);
if (!ret && status == TA_RAP_STATUS__SUCCESS) {
dev_info(adev->dev, "RAP L0 validate test success.\n");
} else {
rap_shared_mem = (struct ta_rap_shared_memory *)
adev->psp.rap_context.context.mem_context.shared_buf;
rap_cmd_output = &(rap_shared_mem->rap_out_message.output);
dev_info(adev->dev, "RAP test failed, the output is:\n");
dev_info(adev->dev, "\tlast_subsection: 0x%08x.\n",
rap_cmd_output->last_subsection);
dev_info(adev->dev, "\tnum_total_validate: 0x%08x.\n",
rap_cmd_output->num_total_validate);
dev_info(adev->dev, "\tnum_valid: 0x%08x.\n",
rap_cmd_output->num_valid);
dev_info(adev->dev, "\tlast_validate_addr: 0x%08x.\n",
rap_cmd_output->last_validate_addr);
dev_info(adev->dev, "\tlast_validate_val: 0x%08x.\n",
rap_cmd_output->last_validate_val);
dev_info(adev->dev, "\tlast_validate_val_exptd: 0x%08x.\n",
rap_cmd_output->last_validate_val_exptd);
}
break;
default:
dev_info(adev->dev, "Unsupported op id: %d, ", op);
dev_info(adev->dev, "Only support op 2(L0 validate test).\n");
break;
}
amdgpu_gfx_off_ctrl(adev, true);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return size;
}
static const struct file_operations amdgpu_rap_debugfs_ops = {
.owner = THIS_MODULE,
.read = NULL,
.write = amdgpu_rap_debugfs_write,
.llseek = default_llseek
};
void amdgpu_rap_debugfs_init(struct amdgpu_device *adev)
{
struct drm_minor *minor = adev_to_drm(adev)->primary;
if (!adev->psp.rap_context.context.initialized)
return;
debugfs_create_file("rap_test", S_IWUSR, minor->debugfs_root,
adev, &amdgpu_rap_debugfs_ops);
}
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c |
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "amdgpu_xcp.h"
#include "amdgpu_drv.h"
#include <drm/drm_drv.h>
#include "../amdxcp/amdgpu_xcp_drv.h"
static int __amdgpu_xcp_run(struct amdgpu_xcp_mgr *xcp_mgr,
struct amdgpu_xcp_ip *xcp_ip, int xcp_state)
{
int (*run_func)(void *handle, uint32_t inst_mask);
int ret = 0;
if (!xcp_ip || !xcp_ip->valid || !xcp_ip->ip_funcs)
return 0;
run_func = NULL;
switch (xcp_state) {
case AMDGPU_XCP_PREPARE_SUSPEND:
run_func = xcp_ip->ip_funcs->prepare_suspend;
break;
case AMDGPU_XCP_SUSPEND:
run_func = xcp_ip->ip_funcs->suspend;
break;
case AMDGPU_XCP_PREPARE_RESUME:
run_func = xcp_ip->ip_funcs->prepare_resume;
break;
case AMDGPU_XCP_RESUME:
run_func = xcp_ip->ip_funcs->resume;
break;
}
if (run_func)
ret = run_func(xcp_mgr->adev, xcp_ip->inst_mask);
return ret;
}
static int amdgpu_xcp_run_transition(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
int state)
{
struct amdgpu_xcp_ip *xcp_ip;
struct amdgpu_xcp *xcp;
int i, ret;
if (xcp_id >= MAX_XCP || !xcp_mgr->xcp[xcp_id].valid)
return -EINVAL;
xcp = &xcp_mgr->xcp[xcp_id];
for (i = 0; i < AMDGPU_XCP_MAX_BLOCKS; ++i) {
xcp_ip = &xcp->ip[i];
ret = __amdgpu_xcp_run(xcp_mgr, xcp_ip, state);
if (ret)
break;
}
return ret;
}
int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
{
return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
AMDGPU_XCP_PREPARE_SUSPEND);
}
int amdgpu_xcp_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
{
return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_SUSPEND);
}
int amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
{
return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
AMDGPU_XCP_PREPARE_RESUME);
}
int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
{
return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_RESUME);
}
static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
struct amdgpu_xcp_ip *ip)
{
struct amdgpu_xcp *xcp;
if (!ip)
return;
xcp = &xcp_mgr->xcp[xcp_id];
xcp->ip[ip->ip_id] = *ip;
xcp->ip[ip->ip_id].valid = true;
xcp->valid = true;
}
int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode)
{
struct amdgpu_device *adev = xcp_mgr->adev;
struct amdgpu_xcp_ip ip;
uint8_t mem_id;
int i, j, ret;
if (!num_xcps || num_xcps > MAX_XCP)
return -EINVAL;
xcp_mgr->mode = mode;
for (i = 0; i < MAX_XCP; ++i)
xcp_mgr->xcp[i].valid = false;
/* This is needed for figuring out memory id of xcp */
xcp_mgr->num_xcp_per_mem_partition = num_xcps / xcp_mgr->adev->gmc.num_mem_partitions;
for (i = 0; i < num_xcps; ++i) {
for (j = AMDGPU_XCP_GFXHUB; j < AMDGPU_XCP_MAX_BLOCKS; ++j) {
ret = xcp_mgr->funcs->get_ip_details(xcp_mgr, i, j,
&ip);
if (ret)
continue;
__amdgpu_xcp_add_block(xcp_mgr, i, &ip);
}
xcp_mgr->xcp[i].id = i;
if (xcp_mgr->funcs->get_xcp_mem_id) {
ret = xcp_mgr->funcs->get_xcp_mem_id(
xcp_mgr, &xcp_mgr->xcp[i], &mem_id);
if (ret)
continue;
else
xcp_mgr->xcp[i].mem_id = mem_id;
}
}
xcp_mgr->num_xcps = num_xcps;
amdgpu_xcp_update_partition_sched_list(adev);
return 0;
}
int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
{
int ret, curr_mode, num_xcps = 0;
if (!xcp_mgr || mode == AMDGPU_XCP_MODE_NONE)
return -EINVAL;
if (xcp_mgr->mode == mode)
return 0;
if (!xcp_mgr->funcs || !xcp_mgr->funcs->switch_partition_mode)
return 0;
mutex_lock(&xcp_mgr->xcp_lock);
curr_mode = xcp_mgr->mode;
/* State set to transient mode */
xcp_mgr->mode = AMDGPU_XCP_MODE_TRANS;
ret = xcp_mgr->funcs->switch_partition_mode(xcp_mgr, mode, &num_xcps);
if (ret) {
/* Failed, get whatever mode it's at now */
if (xcp_mgr->funcs->query_partition_mode)
xcp_mgr->mode = amdgpu_xcp_query_partition_mode(
xcp_mgr, AMDGPU_XCP_FL_LOCKED);
else
xcp_mgr->mode = curr_mode;
goto out;
}
out:
mutex_unlock(&xcp_mgr->xcp_lock);
return ret;
}
int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
{
int mode;
if (xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
return xcp_mgr->mode;
if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode)
return xcp_mgr->mode;
if (!(flags & AMDGPU_XCP_FL_LOCKED))
mutex_lock(&xcp_mgr->xcp_lock);
mode = xcp_mgr->funcs->query_partition_mode(xcp_mgr);
if (xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS && mode != xcp_mgr->mode)
dev_WARN(
xcp_mgr->adev->dev,
"Cached partition mode %d not matching with device mode %d",
xcp_mgr->mode, mode);
if (!(flags & AMDGPU_XCP_FL_LOCKED))
mutex_unlock(&xcp_mgr->xcp_lock);
return mode;
}
static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev)
{
struct drm_device *p_ddev;
struct drm_device *ddev;
int i, ret;
ddev = adev_to_drm(adev);
/* xcp #0 shares drm device setting with adev */
adev->xcp_mgr->xcp->ddev = ddev;
for (i = 1; i < MAX_XCP; i++) {
ret = amdgpu_xcp_drm_dev_alloc(&p_ddev);
if (ret == -ENOSPC) {
dev_warn(adev->dev,
"Skip xcp node #%d when out of drm node resource.", i);
return 0;
} else if (ret) {
return ret;
}
/* Redirect all IOCTLs to the primary device */
adev->xcp_mgr->xcp[i].rdev = p_ddev->render->dev;
adev->xcp_mgr->xcp[i].pdev = p_ddev->primary->dev;
adev->xcp_mgr->xcp[i].driver = (struct drm_driver *)p_ddev->driver;
adev->xcp_mgr->xcp[i].vma_offset_manager = p_ddev->vma_offset_manager;
p_ddev->render->dev = ddev;
p_ddev->primary->dev = ddev;
p_ddev->vma_offset_manager = ddev->vma_offset_manager;
p_ddev->driver = &amdgpu_partition_driver;
adev->xcp_mgr->xcp[i].ddev = p_ddev;
}
return 0;
}
int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
int init_num_xcps,
struct amdgpu_xcp_mgr_funcs *xcp_funcs)
{
struct amdgpu_xcp_mgr *xcp_mgr;
if (!xcp_funcs || !xcp_funcs->switch_partition_mode ||
!xcp_funcs->get_ip_details)
return -EINVAL;
xcp_mgr = kzalloc(sizeof(*xcp_mgr), GFP_KERNEL);
if (!xcp_mgr)
return -ENOMEM;
xcp_mgr->adev = adev;
xcp_mgr->funcs = xcp_funcs;
xcp_mgr->mode = init_mode;
mutex_init(&xcp_mgr->xcp_lock);
if (init_mode != AMDGPU_XCP_MODE_NONE)
amdgpu_xcp_init(xcp_mgr, init_num_xcps, init_mode);
adev->xcp_mgr = xcp_mgr;
return amdgpu_xcp_dev_alloc(adev);
}
int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr,
enum AMDGPU_XCP_IP_BLOCK ip, int instance)
{
struct amdgpu_xcp *xcp;
int i, id_mask = 0;
if (ip >= AMDGPU_XCP_MAX_BLOCKS)
return -EINVAL;
for (i = 0; i < xcp_mgr->num_xcps; ++i) {
xcp = &xcp_mgr->xcp[i];
if ((xcp->valid) && (xcp->ip[ip].valid) &&
(xcp->ip[ip].inst_mask & BIT(instance)))
id_mask |= BIT(i);
}
if (!id_mask)
id_mask = -ENXIO;
return id_mask;
}
int amdgpu_xcp_get_inst_details(struct amdgpu_xcp *xcp,
enum AMDGPU_XCP_IP_BLOCK ip,
uint32_t *inst_mask)
{
if (!xcp->valid || !inst_mask || !(xcp->ip[ip].valid))
return -EINVAL;
*inst_mask = xcp->ip[ip].inst_mask;
return 0;
}
int amdgpu_xcp_dev_register(struct amdgpu_device *adev,
const struct pci_device_id *ent)
{
int i, ret;
if (!adev->xcp_mgr)
return 0;
for (i = 1; i < MAX_XCP; i++) {
if (!adev->xcp_mgr->xcp[i].ddev)
break;
ret = drm_dev_register(adev->xcp_mgr->xcp[i].ddev, ent->driver_data);
if (ret)
return ret;
}
return 0;
}
void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev)
{
struct drm_device *p_ddev;
int i;
if (!adev->xcp_mgr)
return;
for (i = 1; i < MAX_XCP; i++) {
if (!adev->xcp_mgr->xcp[i].ddev)
break;
p_ddev = adev->xcp_mgr->xcp[i].ddev;
drm_dev_unplug(p_ddev);
p_ddev->render->dev = adev->xcp_mgr->xcp[i].rdev;
p_ddev->primary->dev = adev->xcp_mgr->xcp[i].pdev;
p_ddev->driver = adev->xcp_mgr->xcp[i].driver;
p_ddev->vma_offset_manager = adev->xcp_mgr->xcp[i].vma_offset_manager;
}
}
int amdgpu_xcp_open_device(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv,
struct drm_file *file_priv)
{
int i;
if (!adev->xcp_mgr)
return 0;
fpriv->xcp_id = AMDGPU_XCP_NO_PARTITION;
for (i = 0; i < MAX_XCP; ++i) {
if (!adev->xcp_mgr->xcp[i].ddev)
break;
if (file_priv->minor == adev->xcp_mgr->xcp[i].ddev->render) {
if (adev->xcp_mgr->xcp[i].valid == FALSE) {
dev_err(adev->dev, "renderD%d partition %d not valid!",
file_priv->minor->index, i);
return -ENOENT;
}
dev_dbg(adev->dev, "renderD%d partition %d opened!",
file_priv->minor->index, i);
fpriv->xcp_id = i;
break;
}
}
fpriv->vm.mem_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? -1 :
adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id;
return 0;
}
void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
struct amdgpu_ctx_entity *entity)
{
struct drm_gpu_scheduler *sched;
struct amdgpu_ring *ring;
if (!adev->xcp_mgr)
return;
sched = entity->entity.rq->sched;
if (sched->ready) {
ring = to_amdgpu_ring(entity->entity.rq->sched);
atomic_dec(&adev->xcp_mgr->xcp[ring->xcp_id].ref_cnt);
}
}
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c |
/*
* Copyright 2009 Jerome Glisse.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
/*
* Authors:
* Jerome Glisse <[email protected]>
* Dave Airlie
*/
#include <linux/seq_file.h>
#include <linux/atomic.h>
#include <linux/wait.h>
#include <linux/kref.h>
#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/pm_runtime.h>
#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
#include "amdgpu_reset.h"
/*
* Fences mark an event in the GPUs pipeline and are used
* for GPU/CPU synchronization. When the fence is written,
* it is expected that all buffers associated with that fence
* are no longer in use by the associated ring on the GPU and
* that the relevant GPU caches have been flushed.
*/
struct amdgpu_fence {
struct dma_fence base;
/* RB, DMA, etc. */
struct amdgpu_ring *ring;
ktime_t start_timestamp;
};
static struct kmem_cache *amdgpu_fence_slab;
int amdgpu_fence_slab_init(void)
{
amdgpu_fence_slab = kmem_cache_create(
"amdgpu_fence", sizeof(struct amdgpu_fence), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!amdgpu_fence_slab)
return -ENOMEM;
return 0;
}
void amdgpu_fence_slab_fini(void)
{
rcu_barrier();
kmem_cache_destroy(amdgpu_fence_slab);
}
/*
* Cast helper
*/
static const struct dma_fence_ops amdgpu_fence_ops;
static const struct dma_fence_ops amdgpu_job_fence_ops;
static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
{
struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
if (__f->base.ops == &amdgpu_fence_ops ||
__f->base.ops == &amdgpu_job_fence_ops)
return __f;
return NULL;
}
/**
* amdgpu_fence_write - write a fence value
*
* @ring: ring the fence is associated with
* @seq: sequence number to write
*
* Writes a fence value to memory (all asics).
*/
static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
{
struct amdgpu_fence_driver *drv = &ring->fence_drv;
if (drv->cpu_addr)
*drv->cpu_addr = cpu_to_le32(seq);
}
/**
* amdgpu_fence_read - read a fence value
*
* @ring: ring the fence is associated with
*
* Reads a fence value from memory (all asics).
* Returns the value of the fence read from memory.
*/
static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
{
struct amdgpu_fence_driver *drv = &ring->fence_drv;
u32 seq = 0;
if (drv->cpu_addr)
seq = le32_to_cpu(*drv->cpu_addr);
else
seq = atomic_read(&drv->last_seq);
return seq;
}
/**
* amdgpu_fence_emit - emit a fence on the requested ring
*
* @ring: ring the fence is associated with
* @f: resulting fence object
* @job: job the fence is embedded in
* @flags: flags to pass into the subordinate .emit_fence() call
*
* Emits a fence command on the requested ring (all asics).
* Returns 0 on success, -ENOMEM on failure.
*/
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job,
unsigned int flags)
{
struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence;
struct amdgpu_fence *am_fence;
struct dma_fence __rcu **ptr;
uint32_t seq;
int r;
if (job == NULL) {
/* create a sperate hw fence */
am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC);
if (am_fence == NULL)
return -ENOMEM;
fence = &am_fence->base;
am_fence->ring = ring;
} else {
/* take use of job-embedded fence */
fence = &job->hw_fence;
}
seq = ++ring->fence_drv.sync_seq;
if (job && job->job_run_counter) {
/* reinit seq for resubmitted jobs */
fence->seqno = seq;
/* TO be inline with external fence creation and other drivers */
dma_fence_get(fence);
} else {
if (job) {
dma_fence_init(fence, &amdgpu_job_fence_ops,
&ring->fence_drv.lock,
adev->fence_context + ring->idx, seq);
/* Against remove in amdgpu_job_{free, free_cb} */
dma_fence_get(fence);
} else {
dma_fence_init(fence, &amdgpu_fence_ops,
&ring->fence_drv.lock,
adev->fence_context + ring->idx, seq);
}
}
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, flags | AMDGPU_FENCE_FLAG_INT);
pm_runtime_get_noresume(adev_to_drm(adev)->dev);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
struct dma_fence *old;
rcu_read_lock();
old = dma_fence_get_rcu_safe(ptr);
rcu_read_unlock();
if (old) {
r = dma_fence_wait(old, false);
dma_fence_put(old);
if (r)
return r;
}
}
to_amdgpu_fence(fence)->start_timestamp = ktime_get();
/* This function can't be called concurrently anyway, otherwise
* emitting the fence would mess up the hardware ring buffer.
*/
rcu_assign_pointer(*ptr, dma_fence_get(fence));
*f = fence;
return 0;
}
/**
* amdgpu_fence_emit_polling - emit a fence on the requeste ring
*
* @ring: ring the fence is associated with
* @s: resulting sequence number
* @timeout: the timeout for waiting in usecs
*
* Emits a fence command on the requested ring (all asics).
* Used For polling fence.
* Returns 0 on success, -ENOMEM on failure.
*/
int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
uint32_t timeout)
{
uint32_t seq;
signed long r;
if (!s)
return -EINVAL;
seq = ++ring->fence_drv.sync_seq;
r = amdgpu_fence_wait_polling(ring,
seq - ring->fence_drv.num_fences_mask,
timeout);
if (r < 1)
return -ETIMEDOUT;
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, 0);
*s = seq;
return 0;
}
/**
* amdgpu_fence_schedule_fallback - schedule fallback check
*
* @ring: pointer to struct amdgpu_ring
*
* Start a timer as fallback to our interrupts.
*/
static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
{
mod_timer(&ring->fence_drv.fallback_timer,
jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
}
/**
* amdgpu_fence_process - check for fence activity
*
* @ring: pointer to struct amdgpu_ring
*
* Checks the current fence value and calculates the last
* signalled fence value. Wakes the fence queue if the
* sequence number has increased.
*
* Returns true if fence was processed
*/
bool amdgpu_fence_process(struct amdgpu_ring *ring)
{
struct amdgpu_fence_driver *drv = &ring->fence_drv;
struct amdgpu_device *adev = ring->adev;
uint32_t seq, last_seq;
do {
last_seq = atomic_read(&ring->fence_drv.last_seq);
seq = amdgpu_fence_read(ring);
} while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
if (del_timer(&ring->fence_drv.fallback_timer) &&
seq != ring->fence_drv.sync_seq)
amdgpu_fence_schedule_fallback(ring);
if (unlikely(seq == last_seq))
return false;
last_seq &= drv->num_fences_mask;
seq &= drv->num_fences_mask;
do {
struct dma_fence *fence, **ptr;
++last_seq;
last_seq &= drv->num_fences_mask;
ptr = &drv->fences[last_seq];
/* There is always exactly one thread signaling this fence slot */
fence = rcu_dereference_protected(*ptr, 1);
RCU_INIT_POINTER(*ptr, NULL);
if (!fence)
continue;
dma_fence_signal(fence);
dma_fence_put(fence);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
} while (last_seq != seq);
return true;
}
/**
* amdgpu_fence_fallback - fallback for hardware interrupts
*
* @t: timer context used to obtain the pointer to ring structure
*
* Checks for fence activity.
*/
static void amdgpu_fence_fallback(struct timer_list *t)
{
struct amdgpu_ring *ring = from_timer(ring, t,
fence_drv.fallback_timer);
if (amdgpu_fence_process(ring))
DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
}
/**
* amdgpu_fence_wait_empty - wait for all fences to signal
*
* @ring: ring index the fence is associated with
*
* Wait for all fences on the requested ring to signal (all asics).
* Returns 0 if the fences have passed, error for all other cases.
*/
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
{
uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
struct dma_fence *fence, **ptr;
int r;
if (!seq)
return 0;
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
rcu_read_lock();
fence = rcu_dereference(*ptr);
if (!fence || !dma_fence_get_rcu(fence)) {
rcu_read_unlock();
return 0;
}
rcu_read_unlock();
r = dma_fence_wait(fence, false);
dma_fence_put(fence);
return r;
}
/**
* amdgpu_fence_wait_polling - busy wait for givn sequence number
*
* @ring: ring index the fence is associated with
* @wait_seq: sequence number to wait
* @timeout: the timeout for waiting in usecs
*
* Wait for all fences on the requested ring to signal (all asics).
* Returns left time if no timeout, 0 or minus if timeout.
*/
signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
uint32_t wait_seq,
signed long timeout)
{
while ((int32_t)(wait_seq - amdgpu_fence_read(ring)) > 0 && timeout > 0) {
udelay(2);
timeout -= 2;
}
return timeout > 0 ? timeout : 0;
}
/**
* amdgpu_fence_count_emitted - get the count of emitted fences
*
* @ring: ring the fence is associated with
*
* Get the number of fences emitted on the requested ring (all asics).
* Returns the number of emitted fences on the ring. Used by the
* dynpm code to ring track activity.
*/
unsigned int amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
{
uint64_t emitted;
/* We are not protected by ring lock when reading the last sequence
* but it's ok to report slightly wrong fence count here.
*/
emitted = 0x100000000ull;
emitted -= atomic_read(&ring->fence_drv.last_seq);
emitted += READ_ONCE(ring->fence_drv.sync_seq);
return lower_32_bits(emitted);
}
/**
* amdgpu_fence_last_unsignaled_time_us - the time fence emitted until now
* @ring: ring the fence is associated with
*
* Find the earliest fence unsignaled until now, calculate the time delta
* between the time fence emitted and now.
*/
u64 amdgpu_fence_last_unsignaled_time_us(struct amdgpu_ring *ring)
{
struct amdgpu_fence_driver *drv = &ring->fence_drv;
struct dma_fence *fence;
uint32_t last_seq, sync_seq;
last_seq = atomic_read(&ring->fence_drv.last_seq);
sync_seq = READ_ONCE(ring->fence_drv.sync_seq);
if (last_seq == sync_seq)
return 0;
++last_seq;
last_seq &= drv->num_fences_mask;
fence = drv->fences[last_seq];
if (!fence)
return 0;
return ktime_us_delta(ktime_get(),
to_amdgpu_fence(fence)->start_timestamp);
}
/**
* amdgpu_fence_update_start_timestamp - update the timestamp of the fence
* @ring: ring the fence is associated with
* @seq: the fence seq number to update.
* @timestamp: the start timestamp to update.
*
* The function called at the time the fence and related ib is about to
* resubmit to gpu in MCBP scenario. Thus we do not consider race condition
* with amdgpu_fence_process to modify the same fence.
*/
void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq, ktime_t timestamp)
{
struct amdgpu_fence_driver *drv = &ring->fence_drv;
struct dma_fence *fence;
seq &= drv->num_fences_mask;
fence = drv->fences[seq];
if (!fence)
return;
to_amdgpu_fence(fence)->start_timestamp = timestamp;
}
/**
* amdgpu_fence_driver_start_ring - make the fence driver
* ready for use on the requested ring.
*
* @ring: ring to start the fence driver on
* @irq_src: interrupt source to use for this ring
* @irq_type: interrupt type to use for this ring
*
* Make the fence driver ready for processing (all asics).
* Not all asics have all rings, so each asic will only
* start the fence driver on the rings it has.
* Returns 0 for success, errors for failure.
*/
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
struct amdgpu_irq_src *irq_src,
unsigned int irq_type)
{
struct amdgpu_device *adev = ring->adev;
uint64_t index;
if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
ring->fence_drv.cpu_addr = ring->fence_cpu_addr;
ring->fence_drv.gpu_addr = ring->fence_gpu_addr;
} else {
/* put fence directly behind firmware */
index = ALIGN(adev->uvd.fw->size, 8);
ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
}
amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
ring->fence_drv.irq_src = irq_src;
ring->fence_drv.irq_type = irq_type;
ring->fence_drv.initialized = true;
DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr 0x%016llx\n",
ring->name, ring->fence_drv.gpu_addr);
return 0;
}
/**
* amdgpu_fence_driver_init_ring - init the fence driver
* for the requested ring.
*
* @ring: ring to init the fence driver on
*
* Init the fence driver for the requested ring (all asics).
* Helper function for amdgpu_fence_driver_init().
*/
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (!adev)
return -EINVAL;
if (!is_power_of_2(ring->num_hw_submission))
return -EINVAL;
ring->fence_drv.cpu_addr = NULL;
ring->fence_drv.gpu_addr = 0;
ring->fence_drv.sync_seq = 0;
atomic_set(&ring->fence_drv.last_seq, 0);
ring->fence_drv.initialized = false;
timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
ring->fence_drv.num_fences_mask = ring->num_hw_submission * 2 - 1;
spin_lock_init(&ring->fence_drv.lock);
ring->fence_drv.fences = kcalloc(ring->num_hw_submission * 2, sizeof(void *),
GFP_KERNEL);
if (!ring->fence_drv.fences)
return -ENOMEM;
return 0;
}
/**
* amdgpu_fence_driver_sw_init - init the fence driver
* for all possible rings.
*
* @adev: amdgpu device pointer
*
* Init the fence driver for all possible rings (all asics).
* Not all asics have all rings, so each asic will only
* start the fence driver on the rings it has using
* amdgpu_fence_driver_start_ring().
* Returns 0 for success.
*/
int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev)
{
return 0;
}
/**
* amdgpu_fence_need_ring_interrupt_restore - helper function to check whether
* fence driver interrupts need to be restored.
*
* @ring: ring that to be checked
*
* Interrupts for rings that belong to GFX IP don't need to be restored
* when the target power state is s0ix.
*
* Return true if need to restore interrupts, false otherwise.
*/
static bool amdgpu_fence_need_ring_interrupt_restore(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
bool is_gfx_power_domain = false;
switch (ring->funcs->type) {
case AMDGPU_RING_TYPE_SDMA:
/* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
if (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0))
is_gfx_power_domain = true;
break;
case AMDGPU_RING_TYPE_GFX:
case AMDGPU_RING_TYPE_COMPUTE:
case AMDGPU_RING_TYPE_KIQ:
case AMDGPU_RING_TYPE_MES:
is_gfx_power_domain = true;
break;
default:
break;
}
return !(adev->in_s0ix && is_gfx_power_domain);
}
/**
* amdgpu_fence_driver_hw_fini - tear down the fence driver
* for all possible rings.
*
* @adev: amdgpu device pointer
*
* Tear down the fence driver for all possible rings (all asics).
*/
void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
{
int i, r;
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !ring->fence_drv.initialized)
continue;
/* You can't wait for HW to signal if it's gone */
if (!drm_dev_is_unplugged(adev_to_drm(adev)))
r = amdgpu_fence_wait_empty(ring);
else
r = -ENODEV;
/* no need to trigger GPU reset as we are unloading */
if (r)
amdgpu_fence_driver_force_completion(ring);
if (!drm_dev_is_unplugged(adev_to_drm(adev)) &&
ring->fence_drv.irq_src &&
amdgpu_fence_need_ring_interrupt_restore(ring))
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
del_timer_sync(&ring->fence_drv.fallback_timer);
}
}
/* Will either stop and flush handlers for amdgpu interrupt or reanble it */
void amdgpu_fence_driver_isr_toggle(struct amdgpu_device *adev, bool stop)
{
int i;
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !ring->fence_drv.initialized || !ring->fence_drv.irq_src)
continue;
if (stop)
disable_irq(adev->irq.irq);
else
enable_irq(adev->irq.irq);
}
}
void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev)
{
unsigned int i, j;
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !ring->fence_drv.initialized)
continue;
/*
* Notice we check for sched.ops since there's some
* override on the meaning of sched.ready by amdgpu.
* The natural check would be sched.ready, which is
* set as drm_sched_init() finishes...
*/
if (ring->sched.ops)
drm_sched_fini(&ring->sched);
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
dma_fence_put(ring->fence_drv.fences[j]);
kfree(ring->fence_drv.fences);
ring->fence_drv.fences = NULL;
ring->fence_drv.initialized = false;
}
}
/**
* amdgpu_fence_driver_hw_init - enable the fence driver
* for all possible rings.
*
* @adev: amdgpu device pointer
*
* Enable the fence driver for all possible rings (all asics).
* Not all asics have all rings, so each asic will only
* start the fence driver on the rings it has using
* amdgpu_fence_driver_start_ring().
* Returns 0 for success.
*/
void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
{
int i;
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !ring->fence_drv.initialized)
continue;
/* enable the interrupt */
if (ring->fence_drv.irq_src &&
amdgpu_fence_need_ring_interrupt_restore(ring))
amdgpu_irq_get(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
}
}
/**
* amdgpu_fence_driver_clear_job_fences - clear job embedded fences of ring
*
* @ring: fence of the ring to be cleared
*
*/
void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
{
int i;
struct dma_fence *old, **ptr;
for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) {
ptr = &ring->fence_drv.fences[i];
old = rcu_dereference_protected(*ptr, 1);
if (old && old->ops == &amdgpu_job_fence_ops) {
struct amdgpu_job *job;
/* For non-scheduler bad job, i.e. failed ib test, we need to signal
* it right here or we won't be able to track them in fence_drv
* and they will remain unsignaled during sa_bo free.
*/
job = container_of(old, struct amdgpu_job, hw_fence);
if (!job->base.s_fence && !dma_fence_is_signaled(old))
dma_fence_signal(old);
RCU_INIT_POINTER(*ptr, NULL);
dma_fence_put(old);
}
}
}
/**
* amdgpu_fence_driver_set_error - set error code on fences
* @ring: the ring which contains the fences
* @error: the error code to set
*
* Set an error code to all the fences pending on the ring.
*/
void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error)
{
struct amdgpu_fence_driver *drv = &ring->fence_drv;
unsigned long flags;
spin_lock_irqsave(&drv->lock, flags);
for (unsigned int i = 0; i <= drv->num_fences_mask; ++i) {
struct dma_fence *fence;
fence = rcu_dereference_protected(drv->fences[i],
lockdep_is_held(&drv->lock));
if (fence && !dma_fence_is_signaled_locked(fence))
dma_fence_set_error(fence, error);
}
spin_unlock_irqrestore(&drv->lock, flags);
}
/**
* amdgpu_fence_driver_force_completion - force signal latest fence of ring
*
* @ring: fence of the ring to signal
*
*/
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
{
amdgpu_fence_driver_set_error(ring, -ECANCELED);
amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
amdgpu_fence_process(ring);
}
/*
* Common fence implementation
*/
static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
{
return "amdgpu";
}
static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
{
return (const char *)to_amdgpu_fence(f)->ring->name;
}
static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
{
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
return (const char *)to_amdgpu_ring(job->base.sched)->name;
}
/**
* amdgpu_fence_enable_signaling - enable signalling on fence
* @f: fence
*
* This function is called with fence_queue lock held, and adds a callback
* to fence_queue that checks if this fence is signaled, and if so it
* signals the fence and removes itself.
*/
static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
{
if (!timer_pending(&to_amdgpu_fence(f)->ring->fence_drv.fallback_timer))
amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring);
return true;
}
/**
* amdgpu_job_fence_enable_signaling - enable signalling on job fence
* @f: fence
*
* This is the simliar function with amdgpu_fence_enable_signaling above, it
* only handles the job embedded fence.
*/
static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
{
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
return true;
}
/**
* amdgpu_fence_free - free up the fence memory
*
* @rcu: RCU callback head
*
* Free up the fence memory after the RCU grace period.
*/
static void amdgpu_fence_free(struct rcu_head *rcu)
{
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
/* free fence_slab if it's separated fence*/
kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f));
}
/**
* amdgpu_job_fence_free - free up the job with embedded fence
*
* @rcu: RCU callback head
*
* Free up the job with embedded fence after the RCU grace period.
*/
static void amdgpu_job_fence_free(struct rcu_head *rcu)
{
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
/* free job if fence has a parent job */
kfree(container_of(f, struct amdgpu_job, hw_fence));
}
/**
* amdgpu_fence_release - callback that fence can be freed
*
* @f: fence
*
* This function is called when the reference count becomes zero.
* It just RCU schedules freeing up the fence.
*/
static void amdgpu_fence_release(struct dma_fence *f)
{
call_rcu(&f->rcu, amdgpu_fence_free);
}
/**
* amdgpu_job_fence_release - callback that job embedded fence can be freed
*
* @f: fence
*
* This is the simliar function with amdgpu_fence_release above, it
* only handles the job embedded fence.
*/
static void amdgpu_job_fence_release(struct dma_fence *f)
{
call_rcu(&f->rcu, amdgpu_job_fence_free);
}
static const struct dma_fence_ops amdgpu_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_fence_get_timeline_name,
.enable_signaling = amdgpu_fence_enable_signaling,
.release = amdgpu_fence_release,
};
static const struct dma_fence_ops amdgpu_job_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_job_fence_get_timeline_name,
.enable_signaling = amdgpu_job_fence_enable_signaling,
.release = amdgpu_job_fence_release,
};
/*
* Fence debugfs
*/
#if defined(CONFIG_DEBUG_FS)
static int amdgpu_debugfs_fence_info_show(struct seq_file *m, void *unused)
{
struct amdgpu_device *adev = m->private;
int i;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !ring->fence_drv.initialized)
continue;
amdgpu_fence_process(ring);
seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
seq_printf(m, "Last signaled fence 0x%08x\n",
atomic_read(&ring->fence_drv.last_seq));
seq_printf(m, "Last emitted 0x%08x\n",
ring->fence_drv.sync_seq);
if (ring->funcs->type == AMDGPU_RING_TYPE_GFX ||
ring->funcs->type == AMDGPU_RING_TYPE_SDMA) {
seq_printf(m, "Last signaled trailing fence 0x%08x\n",
le32_to_cpu(*ring->trail_fence_cpu_addr));
seq_printf(m, "Last emitted 0x%08x\n",
ring->trail_seq);
}
if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
continue;
/* set in CP_VMID_PREEMPT and preemption occurred */
seq_printf(m, "Last preempted 0x%08x\n",
le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
/* set in CP_VMID_RESET and reset occurred */
seq_printf(m, "Last reset 0x%08x\n",
le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
/* Both preemption and reset occurred */
seq_printf(m, "Last both 0x%08x\n",
le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
}
return 0;
}
/*
* amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover
*
* Manually trigger a gpu reset at the next fence wait.
*/
static int gpu_recover_get(void *data, u64 *val)
{
struct amdgpu_device *adev = (struct amdgpu_device *)data;
struct drm_device *dev = adev_to_drm(adev);
int r;
r = pm_runtime_get_sync(dev->dev);
if (r < 0) {
pm_runtime_put_autosuspend(dev->dev);
return 0;
}
if (amdgpu_reset_domain_schedule(adev->reset_domain, &adev->reset_work))
flush_work(&adev->reset_work);
*val = atomic_read(&adev->reset_domain->reset_res);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_fence_info);
DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gpu_recover_fops, gpu_recover_get, NULL,
"%lld\n");
static void amdgpu_debugfs_reset_work(struct work_struct *work)
{
struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
reset_work);
struct amdgpu_reset_context reset_context;
memset(&reset_context, 0, sizeof(reset_context));
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
}
#endif
void amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
struct drm_minor *minor = adev_to_drm(adev)->primary;
struct dentry *root = minor->debugfs_root;
debugfs_create_file("amdgpu_fence_info", 0444, root, adev,
&amdgpu_debugfs_fence_info_fops);
if (!amdgpu_sriov_vf(adev)) {
INIT_WORK(&adev->reset_work, amdgpu_debugfs_reset_work);
debugfs_create_file("amdgpu_gpu_recover", 0444, root, adev,
&amdgpu_debugfs_gpu_recover_fops);
}
#endif
}
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c |
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "amdgpu_jpeg.h"
#include "amdgpu_pm.h"
#include "soc15.h"
#include "soc15d.h"
#include "jpeg_v2_0.h"
#include "vcn/vcn_2_0_0_offset.h"
#include "vcn/vcn_2_0_0_sh_mask.h"
#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
static void jpeg_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v2_0_set_irq_funcs(struct amdgpu_device *adev);
static int jpeg_v2_0_set_powergating_state(void *handle,
enum amd_powergating_state state);
/**
* jpeg_v2_0_early_init - set function pointers
*
* @handle: amdgpu_device pointer
*
* Set ring and irq function pointers
*/
static int jpeg_v2_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->jpeg.num_jpeg_inst = 1;
adev->jpeg.num_jpeg_rings = 1;
jpeg_v2_0_set_dec_ring_funcs(adev);
jpeg_v2_0_set_irq_funcs(adev);
return 0;
}
/**
* jpeg_v2_0_sw_init - sw init for JPEG block
*
* @handle: amdgpu_device pointer
*
* Load firmware and sw initialization
*/
static int jpeg_v2_0_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
int r;
/* JPEG TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
VCN_2_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq);
if (r)
return r;
r = amdgpu_jpeg_sw_init(adev);
if (r)
return r;
r = amdgpu_jpeg_resume(adev);
if (r)
return r;
ring = adev->jpeg.inst->ring_dec;
ring->use_doorbell = true;
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
ring->vm_hub = AMDGPU_MMHUB0(0);
sprintf(ring->name, "jpeg_dec");
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq,
0, AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
adev->jpeg.internal.jpeg_pitch[0] = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH);
return 0;
}
/**
* jpeg_v2_0_sw_fini - sw fini for JPEG block
*
* @handle: amdgpu_device pointer
*
* JPEG suspend and free up sw allocation
*/
static int jpeg_v2_0_sw_fini(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = amdgpu_jpeg_suspend(adev);
if (r)
return r;
r = amdgpu_jpeg_sw_fini(adev);
return r;
}
/**
* jpeg_v2_0_hw_init - start and test JPEG block
*
* @handle: amdgpu_device pointer
*
*/
static int jpeg_v2_0_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
int r;
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
(adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
r = amdgpu_ring_test_helper(ring);
if (!r)
DRM_INFO("JPEG decode initialized successfully.\n");
return r;
}
/**
* jpeg_v2_0_hw_fini - stop the hardware block
*
* @handle: amdgpu_device pointer
*
* Stop the JPEG block, mark ring as not ready any more
*/
static int jpeg_v2_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
cancel_delayed_work_sync(&adev->vcn.idle_work);
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
jpeg_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
return 0;
}
/**
* jpeg_v2_0_suspend - suspend JPEG block
*
* @handle: amdgpu_device pointer
*
* HW fini and suspend JPEG block
*/
static int jpeg_v2_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
r = jpeg_v2_0_hw_fini(adev);
if (r)
return r;
r = amdgpu_jpeg_suspend(adev);
return r;
}
/**
* jpeg_v2_0_resume - resume JPEG block
*
* @handle: amdgpu_device pointer
*
* Resume firmware and hw init JPEG block
*/
static int jpeg_v2_0_resume(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = amdgpu_jpeg_resume(adev);
if (r)
return r;
r = jpeg_v2_0_hw_init(adev);
return r;
}
static int jpeg_v2_0_disable_power_gating(struct amdgpu_device *adev)
{
uint32_t data;
int r = 0;
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
data = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data);
r = SOC15_WAIT_ON_RREG(JPEG, 0,
mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON,
UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
if (r) {
DRM_ERROR("amdgpu: JPEG disable power gating failed\n");
return r;
}
}
/* Removing the anti hang mechanism to indicate the UVDJ tile is ON */
data = RREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS)) & ~0x1;
WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), data);
return 0;
}
static int jpeg_v2_0_enable_power_gating(struct amdgpu_device *adev)
{
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
uint32_t data;
int r = 0;
data = RREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS));
data &= ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK;
data |= 0x1; //UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_TILES_OFF;
WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), data);
data = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data);
r = SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_PGFSM_STATUS,
(2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT),
UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
if (r) {
DRM_ERROR("amdgpu: JPEG enable power gating failed\n");
return r;
}
}
return 0;
}
static void jpeg_v2_0_disable_clock_gating(struct amdgpu_device *adev)
{
uint32_t data;
data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL);
if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG)
data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
else
data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data);
data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE);
data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
| JPEG_CGC_GATE__JPEG2_DEC_MASK
| JPEG_CGC_GATE__JPEG_ENC_MASK
| JPEG_CGC_GATE__JMCIF_MASK
| JPEG_CGC_GATE__JRBBM_MASK);
WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data);
}
static void jpeg_v2_0_enable_clock_gating(struct amdgpu_device *adev)
{
uint32_t data;
data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL);
if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG)
data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
else
data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data);
data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE);
data |= (JPEG_CGC_GATE__JPEG_DEC_MASK
|JPEG_CGC_GATE__JPEG2_DEC_MASK
|JPEG_CGC_GATE__JPEG_ENC_MASK
|JPEG_CGC_GATE__JMCIF_MASK
|JPEG_CGC_GATE__JRBBM_MASK);
WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data);
}
/**
* jpeg_v2_0_start - start JPEG block
*
* @adev: amdgpu_device pointer
*
* Setup and start the JPEG block
*/
static int jpeg_v2_0_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
int r;
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_jpeg(adev, true);
/* disable power gating */
r = jpeg_v2_0_disable_power_gating(adev);
if (r)
return r;
/* JPEG disable CGC */
jpeg_v2_0_disable_clock_gating(adev);
WREG32_SOC15(JPEG, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
/* enable JMI channel */
WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL), 0,
~UVD_JMI_CNTL__SOFT_RESET_MASK);
/* enable System Interrupt for JRBC */
WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmJPEG_SYS_INT_EN),
JPEG_SYS_INT_EN__DJRBC_MASK,
~JPEG_SYS_INT_EN__DJRBC_MASK);
WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
upper_32_bits(ring->gpu_addr));
WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR, 0);
WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, 0);
WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
ring->wptr = RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR);
return 0;
}
/**
* jpeg_v2_0_stop - stop JPEG block
*
* @adev: amdgpu_device pointer
*
* stop the JPEG block
*/
static int jpeg_v2_0_stop(struct amdgpu_device *adev)
{
int r;
/* reset JMI */
WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL),
UVD_JMI_CNTL__SOFT_RESET_MASK,
~UVD_JMI_CNTL__SOFT_RESET_MASK);
/* enable JPEG CGC */
jpeg_v2_0_enable_clock_gating(adev);
/* enable power gating */
r = jpeg_v2_0_enable_power_gating(adev);
if (r)
return r;
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_jpeg(adev, false);
return 0;
}
/**
* jpeg_v2_0_dec_ring_get_rptr - get read pointer
*
* @ring: amdgpu_ring pointer
*
* Returns the current hardware read pointer
*/
static uint64_t jpeg_v2_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR);
}
/**
* jpeg_v2_0_dec_ring_get_wptr - get write pointer
*
* @ring: amdgpu_ring pointer
*
* Returns the current hardware write pointer
*/
static uint64_t jpeg_v2_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring->use_doorbell)
return *ring->wptr_cpu_addr;
else
return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR);
}
/**
* jpeg_v2_0_dec_ring_set_wptr - set write pointer
*
* @ring: amdgpu_ring pointer
*
* Commits the write pointer to the hardware
*/
static void jpeg_v2_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring->use_doorbell) {
*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
} else {
WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
}
}
/**
* jpeg_v2_0_dec_ring_insert_start - insert a start command
*
* @ring: amdgpu_ring pointer
*
* Write a start command to the ring.
*/
void jpeg_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x68e04);
amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x80010000);
}
/**
* jpeg_v2_0_dec_ring_insert_end - insert a end command
*
* @ring: amdgpu_ring pointer
*
* Write a end command to the ring.
*/
void jpeg_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x68e04);
amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x00010000);
}
/**
* jpeg_v2_0_dec_ring_emit_fence - emit an fence & trap command
*
* @ring: amdgpu_ring pointer
* @addr: address
* @seq: sequence number
* @flags: fence related flags
*
* Write a fence and a trap command to the ring.
*/
void jpeg_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags)
{
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, seq);
amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, seq);
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x8);
amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET,
0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x3fbc);
amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x1);
amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE7));
amdgpu_ring_write(ring, 0);
}
/**
* jpeg_v2_0_dec_ring_emit_ib - execute indirect buffer
*
* @ring: amdgpu_ring pointer
* @job: job to retrieve vmid from
* @ib: indirect buffer to execute
* @flags: unused
*
* Write ring commands to execute the indirect buffer.
*/
void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_job *job,
struct amdgpu_ib *ib,
uint32_t flags)
{
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_IH_CTRL_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, (vmid << JPEG_IH_CTRL__IH_VMID__SHIFT));
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, (vmid | (vmid << 4)));
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, (vmid | (vmid << 4)));
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, ib->length_dw);
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
amdgpu_ring_write(ring, PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x01400200);
amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x2);
amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_STATUS_INTERNAL_OFFSET,
0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
amdgpu_ring_write(ring, 0x2);
}
void jpeg_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val, uint32_t mask)
{
uint32_t reg_offset = (reg << 2);
amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, 0x01400200);
amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
amdgpu_ring_write(ring, val);
amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) {
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring,
PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
} else {
amdgpu_ring_write(ring, reg_offset);
amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
0, 0, PACKETJ_TYPE3));
}
amdgpu_ring_write(ring, mask);
}
void jpeg_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
uint32_t data0, data1, mask;
pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
/* wait for register write */
data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
data1 = lower_32_bits(pd_addr);
mask = 0xffffffff;
jpeg_v2_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
}
void jpeg_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
{
uint32_t reg_offset = (reg << 2);
amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
0, 0, PACKETJ_TYPE0));
if (reg_offset >= 0x10000 && reg_offset <= 0x105ff) {
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring,
PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
} else {
amdgpu_ring_write(ring, reg_offset);
amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
0, 0, PACKETJ_TYPE0));
}
amdgpu_ring_write(ring, val);
}
void jpeg_v2_0_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count)
{
int i;
WARN_ON(ring->wptr % 2 || count % 2);
for (i = 0; i < count / 2; i++) {
amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
amdgpu_ring_write(ring, 0);
}
}
static bool jpeg_v2_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return ((RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS) &
UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
}
static int jpeg_v2_0_wait_for_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
ret = SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS, UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
return ret;
}
static int jpeg_v2_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
if (!jpeg_v2_0_is_idle(handle))
return -EBUSY;
jpeg_v2_0_enable_clock_gating(adev);
} else {
jpeg_v2_0_disable_clock_gating(adev);
}
return 0;
}
static int jpeg_v2_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
if (state == adev->jpeg.cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
ret = jpeg_v2_0_stop(adev);
else
ret = jpeg_v2_0_start(adev);
if (!ret)
adev->jpeg.cur_state = state;
return ret;
}
static int jpeg_v2_0_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
return 0;
}
static int jpeg_v2_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
DRM_DEBUG("IH: JPEG TRAP\n");
switch (entry->src_id) {
case VCN_2_0__SRCID__JPEG_DECODE:
amdgpu_fence_process(adev->jpeg.inst->ring_dec);
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
break;
}
return 0;
}
static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = {
.name = "jpeg_v2_0",
.early_init = jpeg_v2_0_early_init,
.late_init = NULL,
.sw_init = jpeg_v2_0_sw_init,
.sw_fini = jpeg_v2_0_sw_fini,
.hw_init = jpeg_v2_0_hw_init,
.hw_fini = jpeg_v2_0_hw_fini,
.suspend = jpeg_v2_0_suspend,
.resume = jpeg_v2_0_resume,
.is_idle = jpeg_v2_0_is_idle,
.wait_for_idle = jpeg_v2_0_wait_for_idle,
.check_soft_reset = NULL,
.pre_soft_reset = NULL,
.soft_reset = NULL,
.post_soft_reset = NULL,
.set_clockgating_state = jpeg_v2_0_set_clockgating_state,
.set_powergating_state = jpeg_v2_0_set_powergating_state,
};
static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_JPEG,
.align_mask = 0xf,
.get_rptr = jpeg_v2_0_dec_ring_get_rptr,
.get_wptr = jpeg_v2_0_dec_ring_get_wptr,
.set_wptr = jpeg_v2_0_dec_ring_set_wptr,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
8 + /* jpeg_v2_0_dec_ring_emit_vm_flush */
18 + 18 + /* jpeg_v2_0_dec_ring_emit_fence x2 vm fence */
8 + 16,
.emit_ib_size = 24, /* jpeg_v2_0_dec_ring_emit_ib */
.emit_ib = jpeg_v2_0_dec_ring_emit_ib,
.emit_fence = jpeg_v2_0_dec_ring_emit_fence,
.emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush,
.test_ring = amdgpu_jpeg_dec_ring_test_ring,
.test_ib = amdgpu_jpeg_dec_ring_test_ib,
.insert_nop = jpeg_v2_0_dec_ring_nop,
.insert_start = jpeg_v2_0_dec_ring_insert_start,
.insert_end = jpeg_v2_0_dec_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_jpeg_ring_begin_use,
.end_use = amdgpu_jpeg_ring_end_use,
.emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
static void jpeg_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
{
adev->jpeg.inst->ring_dec->funcs = &jpeg_v2_0_dec_ring_vm_funcs;
DRM_INFO("JPEG decode is enabled in VM mode\n");
}
static const struct amdgpu_irq_src_funcs jpeg_v2_0_irq_funcs = {
.set = jpeg_v2_0_set_interrupt_state,
.process = jpeg_v2_0_process_interrupt,
};
static void jpeg_v2_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->jpeg.inst->irq.num_types = 1;
adev->jpeg.inst->irq.funcs = &jpeg_v2_0_irq_funcs;
}
const struct amdgpu_ip_block_version jpeg_v2_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_JPEG,
.major = 2,
.minor = 0,
.rev = 0,
.funcs = &jpeg_v2_0_ip_funcs,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c |
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "nbio_v4_3.h"
#include "nbio/nbio_4_3_0_offset.h"
#include "nbio/nbio_4_3_0_sh_mask.h"
#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
#include <uapi/linux/kfd_ioctl.h>
static void nbio_v4_3_remap_hdp_registers(struct amdgpu_device *adev)
{
WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL,
adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_REG_FLUSH_CNTL,
adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
}
static u32 nbio_v4_3_get_rev_id(struct amdgpu_device *adev)
{
u32 tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0);
tmp &= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
tmp >>= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
return tmp;
}
static void nbio_v4_3_mc_access_enable(struct amdgpu_device *adev, bool enable)
{
if (enable)
WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN,
BIF_BX0_BIF_FB_EN__FB_READ_EN_MASK |
BIF_BX0_BIF_FB_EN__FB_WRITE_EN_MASK);
else
WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN, 0);
}
static u32 nbio_v4_3_get_memsize(struct amdgpu_device *adev)
{
return RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE);
}
static void nbio_v4_3_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
bool use_doorbell, int doorbell_index,
int doorbell_size)
{
if (instance == 0) {
u32 doorbell_range = RREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_2_CTRL);
if (use_doorbell) {
doorbell_range = REG_SET_FIELD(doorbell_range,
S2A_DOORBELL_ENTRY_2_CTRL,
S2A_DOORBELL_PORT2_ENABLE,
0x1);
doorbell_range = REG_SET_FIELD(doorbell_range,
S2A_DOORBELL_ENTRY_2_CTRL,
S2A_DOORBELL_PORT2_AWID,
0xe);
doorbell_range = REG_SET_FIELD(doorbell_range,
S2A_DOORBELL_ENTRY_2_CTRL,
S2A_DOORBELL_PORT2_RANGE_OFFSET,
doorbell_index);
doorbell_range = REG_SET_FIELD(doorbell_range,
S2A_DOORBELL_ENTRY_2_CTRL,
S2A_DOORBELL_PORT2_RANGE_SIZE,
doorbell_size);
doorbell_range = REG_SET_FIELD(doorbell_range,
S2A_DOORBELL_ENTRY_2_CTRL,
S2A_DOORBELL_PORT2_AWADDR_31_28_VALUE,
0x3);
} else
doorbell_range = REG_SET_FIELD(doorbell_range,
S2A_DOORBELL_ENTRY_2_CTRL,
S2A_DOORBELL_PORT2_RANGE_SIZE,
0);
WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_2_CTRL, doorbell_range);
}
}
static void nbio_v4_3_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell,
int doorbell_index, int instance)
{
u32 doorbell_range;
if (instance)
doorbell_range = RREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_5_CTRL);
else
doorbell_range = RREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_4_CTRL);
if (use_doorbell) {
doorbell_range = REG_SET_FIELD(doorbell_range,
S2A_DOORBELL_ENTRY_4_CTRL,
S2A_DOORBELL_PORT4_ENABLE,
0x1);
doorbell_range = REG_SET_FIELD(doorbell_range,
S2A_DOORBELL_ENTRY_4_CTRL,
S2A_DOORBELL_PORT4_AWID,
instance ? 0x7 : 0x4);
doorbell_range = REG_SET_FIELD(doorbell_range,
S2A_DOORBELL_ENTRY_4_CTRL,
S2A_DOORBELL_PORT4_RANGE_OFFSET,
doorbell_index);
doorbell_range = REG_SET_FIELD(doorbell_range,
S2A_DOORBELL_ENTRY_4_CTRL,
S2A_DOORBELL_PORT4_RANGE_SIZE,
8);
doorbell_range = REG_SET_FIELD(doorbell_range,
S2A_DOORBELL_ENTRY_4_CTRL,
S2A_DOORBELL_PORT4_AWADDR_31_28_VALUE,
instance ? 0x7 : 0x4);
} else
doorbell_range = REG_SET_FIELD(doorbell_range,
S2A_DOORBELL_ENTRY_4_CTRL,
S2A_DOORBELL_PORT4_RANGE_SIZE,
0);
if (instance)
WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_5_CTRL, doorbell_range);
else
WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_4_CTRL, doorbell_range);
}
static void nbio_v4_3_gc_doorbell_init(struct amdgpu_device *adev)
{
WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_0_CTRL, 0x30000007);
WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_3_CTRL, 0x3000000d);
}
static void nbio_v4_3_enable_doorbell_aperture(struct amdgpu_device *adev,
bool enable)
{
WREG32_FIELD15_PREREG(NBIO, 0, RCC_DEV0_EPF0_RCC_DOORBELL_APER_EN,
BIF_DOORBELL_APER_EN, enable ? 1 : 0);
}
static void nbio_v4_3_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
bool enable)
{
u32 tmp = 0;
if (enable) {
tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
DOORBELL_SELFRING_GPA_APER_EN, 1) |
REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
DOORBELL_SELFRING_GPA_APER_MODE, 1) |
REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
DOORBELL_SELFRING_GPA_APER_SIZE, 0);
WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW,
lower_32_bits(adev->doorbell.base));
WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH,
upper_32_bits(adev->doorbell.base));
}
WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
tmp);
}
static void nbio_v4_3_ih_doorbell_range(struct amdgpu_device *adev,
bool use_doorbell, int doorbell_index)
{
u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_1_CTRL);
if (use_doorbell) {
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_ENABLE,
0x1);
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_AWID,
0x0);
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_RANGE_OFFSET,
doorbell_index);
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_RANGE_SIZE,
2);
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE,
0x0);
} else
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
S2A_DOORBELL_ENTRY_1_CTRL,
S2A_DOORBELL_PORT1_RANGE_SIZE,
0);
WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_1_CTRL, ih_doorbell_range);
}
static void nbio_v4_3_ih_control(struct amdgpu_device *adev)
{
u32 interrupt_cntl;
/* setup interrupt control */
WREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
interrupt_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL);
/*
* BIF_BX0_INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
* BIF_BX0_INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
*/
interrupt_cntl = REG_SET_FIELD(interrupt_cntl, BIF_BX0_INTERRUPT_CNTL,
IH_DUMMY_RD_OVERRIDE, 0);
/* BIF_BX0_INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
interrupt_cntl = REG_SET_FIELD(interrupt_cntl, BIF_BX0_INTERRUPT_CNTL,
IH_REQ_NONSNOOP_EN, 0);
WREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL, interrupt_cntl);
}
static void nbio_v4_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t def, data;
if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG))
return;
def = data = RREG32_SOC15(NBIO, 0, regCPM_CONTROL);
if (enable) {
data |= (CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
} else {
data &= ~(CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
}
if (def != data)
WREG32_SOC15(NBIO, 0, regCPM_CONTROL, data);
}
static void nbio_v4_3_update_medium_grain_light_sleep(struct amdgpu_device *adev,
bool enable)
{
uint32_t def, data;
if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
return;
/* TODO: need update in future */
def = data = RREG32_SOC15(NBIO, 0, regPCIE_CNTL2);
if (enable) {
data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
} else {
data &= ~PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
}
if (def != data)
WREG32_SOC15(NBIO, 0, regPCIE_CNTL2, data);
}
static void nbio_v4_3_get_clockgating_state(struct amdgpu_device *adev,
u64 *flags)
{
int data;
/* AMD_CG_SUPPORT_BIF_MGCG */
data = RREG32_SOC15(NBIO, 0, regCPM_CONTROL);
if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
*flags |= AMD_CG_SUPPORT_BIF_MGCG;
/* AMD_CG_SUPPORT_BIF_LS */
data = RREG32_SOC15(NBIO, 0, regPCIE_CNTL2);
if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
*flags |= AMD_CG_SUPPORT_BIF_LS;
}
static u32 nbio_v4_3_get_hdp_flush_req_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_GPU_HDP_FLUSH_REQ);
}
static u32 nbio_v4_3_get_hdp_flush_done_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_GPU_HDP_FLUSH_DONE);
}
static u32 nbio_v4_3_get_pcie_index_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_RSMU_INDEX);
}
static u32 nbio_v4_3_get_pcie_data_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_RSMU_DATA);
}
const struct nbio_hdp_flush_reg nbio_v4_3_hdp_flush_reg = {
.ref_and_mask_cp0 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP0_MASK,
.ref_and_mask_cp1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP1_MASK,
.ref_and_mask_cp2 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP2_MASK,
.ref_and_mask_cp3 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP3_MASK,
.ref_and_mask_cp4 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP4_MASK,
.ref_and_mask_cp5 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP5_MASK,
.ref_and_mask_cp6 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP6_MASK,
.ref_and_mask_cp7 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP7_MASK,
.ref_and_mask_cp8 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP8_MASK,
.ref_and_mask_cp9 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP9_MASK,
.ref_and_mask_sdma0 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA0_MASK,
.ref_and_mask_sdma1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA1_MASK,
};
static void nbio_v4_3_init_registers(struct amdgpu_device *adev)
{
if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(4, 3, 0)) {
uint32_t data;
data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2);
data &= ~RCC_DEV0_EPF2_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F2_MASK;
WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2, data);
}
if (amdgpu_sriov_vf(adev))
adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
}
static u32 nbio_v4_3_get_rom_offset(struct amdgpu_device *adev)
{
u32 data, rom_offset;
data = RREG32_SOC15(NBIO, 0, regREGS_ROM_OFFSET_CTRL);
rom_offset = REG_GET_FIELD(data, REGS_ROM_OFFSET_CTRL, ROM_OFFSET);
return rom_offset;
}
#ifdef CONFIG_PCIEASPM
static void nbio_v4_3_program_ltr(struct amdgpu_device *adev)
{
uint32_t def, data;
def = RREG32_SOC15(NBIO, 0, regRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL);
data = 0x35EB;
data &= ~EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK;
data &= ~EP_PCIE_TX_LTR_CNTL__LTR_PRIV_RST_LTR_IN_DL_DOWN_MASK;
if (def != data)
WREG32_SOC15(NBIO, 0, regRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, data);
def = data = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP2);
data &= ~RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK;
if (def != data)
WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP2, data);
def = data = RREG32_SOC15(NBIO, 0, regBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
if (adev->pdev->ltr_path)
data |= BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
else
data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
if (def != data)
WREG32_SOC15(NBIO, 0, regBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
}
#endif
static void nbio_v4_3_program_aspm(struct amdgpu_device *adev)
{
#ifdef CONFIG_PCIEASPM
uint32_t def, data;
if (!(adev->ip_versions[PCIE_HWIP][0] == IP_VERSION(7, 4, 0)) &&
!(adev->ip_versions[PCIE_HWIP][0] == IP_VERSION(7, 6, 0)))
return;
def = data = RREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL);
data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
if (def != data)
WREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL, data);
def = data = RREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL7);
data |= PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK;
if (def != data)
WREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL7, data);
def = data = RREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL3);
data |= PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
if (def != data)
WREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL3, data);
def = data = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP3);
data &= ~RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK;
data &= ~RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK;
if (def != data)
WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP3, data);
def = data = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP5);
data &= ~RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK;
if (def != data)
WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP5, data);
def = data = RREG32_SOC15(NBIO, 0, regBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
if (def != data)
WREG32_SOC15(NBIO, 0, regBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
WREG32_SOC15(NBIO, 0, regBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP, 0x10011001);
def = data = RREG32_SOC15(NBIO, 0, regPSWUSP0_PCIE_LC_CNTL2);
data |= PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK |
PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
data &= ~PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK;
if (def != data)
WREG32_SOC15(NBIO, 0, regPSWUSP0_PCIE_LC_CNTL2, data);
def = data = RREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL4);
data |= PCIE_LC_CNTL4__LC_L1_POWERDOWN_MASK;
if (def != data)
WREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL4, data);
def = data = RREG32_SOC15(NBIO, 0, regPCIE_LC_RXRECOVER_RXSTANDBY_CNTL);
data |= PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_RX_L0S_STANDBY_EN_MASK;
if (def != data)
WREG32_SOC15(NBIO, 0, regPCIE_LC_RXRECOVER_RXSTANDBY_CNTL, data);
nbio_v4_3_program_ltr(adev);
def = data = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP3);
data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
data |= 0x0010 << RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT;
if (def != data)
WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP3, data);
def = data = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP5);
data |= 0x0010 << RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT;
if (def != data)
WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP5, data);
def = data = RREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL);
data |= 0x0 << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
if (def != data)
WREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL, data);
def = data = RREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL3);
data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
if (def != data)
WREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL3, data);
#endif
}
const struct amdgpu_nbio_funcs nbio_v4_3_funcs = {
.get_hdp_flush_req_offset = nbio_v4_3_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v4_3_get_hdp_flush_done_offset,
.get_pcie_index_offset = nbio_v4_3_get_pcie_index_offset,
.get_pcie_data_offset = nbio_v4_3_get_pcie_data_offset,
.get_rev_id = nbio_v4_3_get_rev_id,
.mc_access_enable = nbio_v4_3_mc_access_enable,
.get_memsize = nbio_v4_3_get_memsize,
.sdma_doorbell_range = nbio_v4_3_sdma_doorbell_range,
.vcn_doorbell_range = nbio_v4_3_vcn_doorbell_range,
.gc_doorbell_init = nbio_v4_3_gc_doorbell_init,
.enable_doorbell_aperture = nbio_v4_3_enable_doorbell_aperture,
.enable_doorbell_selfring_aperture = nbio_v4_3_enable_doorbell_selfring_aperture,
.ih_doorbell_range = nbio_v4_3_ih_doorbell_range,
.update_medium_grain_clock_gating = nbio_v4_3_update_medium_grain_clock_gating,
.update_medium_grain_light_sleep = nbio_v4_3_update_medium_grain_light_sleep,
.get_clockgating_state = nbio_v4_3_get_clockgating_state,
.ih_control = nbio_v4_3_ih_control,
.init_registers = nbio_v4_3_init_registers,
.remap_hdp_registers = nbio_v4_3_remap_hdp_registers,
.get_rom_offset = nbio_v4_3_get_rom_offset,
.program_aspm = nbio_v4_3_program_aspm,
};
static void nbio_v4_3_sriov_ih_doorbell_range(struct amdgpu_device *adev,
bool use_doorbell, int doorbell_index)
{
}
static void nbio_v4_3_sriov_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
bool use_doorbell, int doorbell_index,
int doorbell_size)
{
}
static void nbio_v4_3_sriov_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell,
int doorbell_index, int instance)
{
}
static void nbio_v4_3_sriov_gc_doorbell_init(struct amdgpu_device *adev)
{
}
const struct amdgpu_nbio_funcs nbio_v4_3_sriov_funcs = {
.get_hdp_flush_req_offset = nbio_v4_3_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v4_3_get_hdp_flush_done_offset,
.get_pcie_index_offset = nbio_v4_3_get_pcie_index_offset,
.get_pcie_data_offset = nbio_v4_3_get_pcie_data_offset,
.get_rev_id = nbio_v4_3_get_rev_id,
.mc_access_enable = nbio_v4_3_mc_access_enable,
.get_memsize = nbio_v4_3_get_memsize,
.sdma_doorbell_range = nbio_v4_3_sriov_sdma_doorbell_range,
.vcn_doorbell_range = nbio_v4_3_sriov_vcn_doorbell_range,
.gc_doorbell_init = nbio_v4_3_sriov_gc_doorbell_init,
.enable_doorbell_aperture = nbio_v4_3_enable_doorbell_aperture,
.enable_doorbell_selfring_aperture = nbio_v4_3_enable_doorbell_selfring_aperture,
.ih_doorbell_range = nbio_v4_3_sriov_ih_doorbell_range,
.update_medium_grain_clock_gating = nbio_v4_3_update_medium_grain_clock_gating,
.update_medium_grain_light_sleep = nbio_v4_3_update_medium_grain_light_sleep,
.get_clockgating_state = nbio_v4_3_get_clockgating_state,
.ih_control = nbio_v4_3_ih_control,
.init_registers = nbio_v4_3_init_registers,
.remap_hdp_registers = nbio_v4_3_remap_hdp_registers,
.get_rom_offset = nbio_v4_3_get_rom_offset,
};
static int nbio_v4_3_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
{
/* The ras_controller_irq enablement should be done in psp bl when it
* tries to enable ras feature. Driver only need to set the correct interrupt
* vector for bare-metal and sriov use case respectively
*/
uint32_t bif_doorbell_int_cntl;
bif_doorbell_int_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL);
bif_doorbell_int_cntl = REG_SET_FIELD(bif_doorbell_int_cntl,
BIF_BX0_BIF_DOORBELL_INT_CNTL,
RAS_ATHUB_ERR_EVENT_INTERRUPT_DISABLE,
(state == AMDGPU_IRQ_STATE_ENABLE) ? 0 : 1);
WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_int_cntl);
return 0;
}
static int nbio_v4_3_process_err_event_athub_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
/* By design, the ih cookie for err_event_athub_irq should be written
* to bif ring. since bif ring is not enabled, just leave process callback
* as a dummy one.
*/
return 0;
}
static const struct amdgpu_irq_src_funcs nbio_v4_3_ras_err_event_athub_irq_funcs = {
.set = nbio_v4_3_set_ras_err_event_athub_irq_state,
.process = nbio_v4_3_process_err_event_athub_irq,
};
static void nbio_v4_3_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev)
{
uint32_t bif_doorbell_int_cntl;
bif_doorbell_int_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL);
if (REG_GET_FIELD(bif_doorbell_int_cntl,
BIF_DOORBELL_INT_CNTL,
RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) {
/* driver has to clear the interrupt status when bif ring is disabled */
bif_doorbell_int_cntl = REG_SET_FIELD(bif_doorbell_int_cntl,
BIF_DOORBELL_INT_CNTL,
RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1);
WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_int_cntl);
amdgpu_ras_global_ras_isr(adev);
}
}
static int nbio_v4_3_init_ras_err_event_athub_interrupt(struct amdgpu_device *adev)
{
int r;
/* init the irq funcs */
adev->nbio.ras_err_event_athub_irq.funcs =
&nbio_v4_3_ras_err_event_athub_irq_funcs;
adev->nbio.ras_err_event_athub_irq.num_types = 1;
/* register ras err event athub interrupt
* nbio v4_3 uses the same irq source as nbio v7_4 */
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_BIF,
NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT,
&adev->nbio.ras_err_event_athub_irq);
return r;
}
struct amdgpu_nbio_ras nbio_v4_3_ras = {
.handle_ras_err_event_athub_intr_no_bifring = nbio_v4_3_handle_ras_err_event_athub_intr_no_bifring,
.init_ras_err_event_athub_interrupt = nbio_v4_3_init_ras_err_event_athub_interrupt,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c |
/*
* Copyright 2007-8 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
*/
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "atom.h"
#include "atombios_encoders.h"
#include "atombios_dp.h"
#include "amdgpu_connectors.h"
#include "amdgpu_i2c.h"
#include "amdgpu_display.h"
#include <linux/pm_runtime.h>
void amdgpu_connector_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
/* bail if the connector does not have hpd pin, e.g.,
* VGA, TV, etc.
*/
if (amdgpu_connector->hpd.hpd == AMDGPU_HPD_NONE)
return;
amdgpu_display_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
/* if the connector is already off, don't turn it back on */
if (connector->dpms != DRM_MODE_DPMS_ON)
return;
/* just deal with DP (not eDP) here. */
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
struct amdgpu_connector_atom_dig *dig_connector =
amdgpu_connector->con_priv;
/* if existing sink type was not DP no need to retrain */
if (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT)
return;
/* first get sink type as it may be reset after (un)plug */
dig_connector->dp_sink_type = amdgpu_atombios_dp_get_sinktype(amdgpu_connector);
/* don't do anything if sink is not display port, i.e.,
* passive dp->(dvi|hdmi) adaptor
*/
if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd) &&
amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
/* Don't start link training before we have the DPCD */
if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
return;
/* Turn the connector off and back on immediately, which
* will trigger link training
*/
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
}
}
}
static void amdgpu_connector_property_change_mode(struct drm_encoder *encoder)
{
struct drm_crtc *crtc = encoder->crtc;
if (crtc && crtc->enabled) {
drm_crtc_helper_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y, crtc->primary->fb);
}
}
int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct amdgpu_connector_atom_dig *dig_connector;
int bpc = 8;
unsigned mode_clock, max_tmds_clock;
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB:
if (amdgpu_connector->use_digital) {
if (connector->display_info.is_hdmi) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
}
break;
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
if (connector->display_info.is_hdmi) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
break;
case DRM_MODE_CONNECTOR_DisplayPort:
dig_connector = amdgpu_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) ||
connector->display_info.is_hdmi) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
break;
case DRM_MODE_CONNECTOR_eDP:
case DRM_MODE_CONNECTOR_LVDS:
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
else {
const struct drm_connector_helper_funcs *connector_funcs =
connector->helper_private;
struct drm_encoder *encoder = connector_funcs->best_encoder(connector);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
if (dig->lcd_misc & ATOM_PANEL_MISC_V13_6BIT_PER_COLOR)
bpc = 6;
else if (dig->lcd_misc & ATOM_PANEL_MISC_V13_8BIT_PER_COLOR)
bpc = 8;
}
break;
}
if (connector->display_info.is_hdmi) {
/*
* Pre DCE-8 hw can't handle > 12 bpc, and more than 12 bpc doesn't make
* much sense without support for > 12 bpc framebuffers. RGB 4:4:4 at
* 12 bpc is always supported on hdmi deep color sinks, as this is
* required by the HDMI-1.3 spec. Clamp to a safe 12 bpc maximum.
*/
if (bpc > 12) {
DRM_DEBUG("%s: HDMI deep color %d bpc unsupported. Using 12 bpc.\n",
connector->name, bpc);
bpc = 12;
}
/* Any defined maximum tmds clock limit we must not exceed? */
if (connector->display_info.max_tmds_clock > 0) {
/* mode_clock is clock in kHz for mode to be modeset on this connector */
mode_clock = amdgpu_connector->pixelclock_for_modeset;
/* Maximum allowable input clock in kHz */
max_tmds_clock = connector->display_info.max_tmds_clock;
DRM_DEBUG("%s: hdmi mode dotclock %d kHz, max tmds input clock %d kHz.\n",
connector->name, mode_clock, max_tmds_clock);
/* Check if bpc is within clock limit. Try to degrade gracefully otherwise */
if ((bpc == 12) && (mode_clock * 3/2 > max_tmds_clock)) {
if ((connector->display_info.edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_30) &&
(mode_clock * 5/4 <= max_tmds_clock))
bpc = 10;
else
bpc = 8;
DRM_DEBUG("%s: HDMI deep color 12 bpc exceeds max tmds clock. Using %d bpc.\n",
connector->name, bpc);
}
if ((bpc == 10) && (mode_clock * 5/4 > max_tmds_clock)) {
bpc = 8;
DRM_DEBUG("%s: HDMI deep color 10 bpc exceeds max tmds clock. Using %d bpc.\n",
connector->name, bpc);
}
} else if (bpc > 8) {
/* max_tmds_clock missing, but hdmi spec mandates it for deep color. */
DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n",
connector->name);
bpc = 8;
}
}
if ((amdgpu_deep_color == 0) && (bpc > 8)) {
DRM_DEBUG("%s: Deep color disabled. Set amdgpu module param deep_color=1 to enable.\n",
connector->name);
bpc = 8;
}
DRM_DEBUG("%s: Display bpc=%d, returned bpc=%d\n",
connector->name, connector->display_info.bpc, bpc);
return bpc;
}
static void
amdgpu_connector_update_scratch_regs(struct drm_connector *connector,
enum drm_connector_status status)
{
struct drm_encoder *best_encoder;
struct drm_encoder *encoder;
const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
bool connected;
best_encoder = connector_funcs->best_encoder(connector);
drm_connector_for_each_possible_encoder(connector, encoder) {
if ((encoder == best_encoder) && (status == connector_status_connected))
connected = true;
else
connected = false;
amdgpu_atombios_encoder_set_bios_scratch_regs(connector, encoder, connected);
}
}
static struct drm_encoder *
amdgpu_connector_find_encoder(struct drm_connector *connector,
int encoder_type)
{
struct drm_encoder *encoder;
drm_connector_for_each_possible_encoder(connector, encoder) {
if (encoder->encoder_type == encoder_type)
return encoder;
}
return NULL;
}
struct edid *amdgpu_connector_edid(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct drm_property_blob *edid_blob = connector->edid_blob_ptr;
if (amdgpu_connector->edid) {
return amdgpu_connector->edid;
} else if (edid_blob) {
struct edid *edid = kmemdup(edid_blob->data, edid_blob->length, GFP_KERNEL);
if (edid)
amdgpu_connector->edid = edid;
}
return amdgpu_connector->edid;
}
static struct edid *
amdgpu_connector_get_hardcoded_edid(struct amdgpu_device *adev)
{
struct edid *edid;
if (adev->mode_info.bios_hardcoded_edid) {
edid = kmalloc(adev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL);
if (edid) {
memcpy((unsigned char *)edid,
(unsigned char *)adev->mode_info.bios_hardcoded_edid,
adev->mode_info.bios_hardcoded_edid_size);
return edid;
}
}
return NULL;
}
static void amdgpu_connector_get_edid(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->edid)
return;
/* on hw with routers, select right port */
if (amdgpu_connector->router.ddc_valid)
amdgpu_i2c_router_select_ddc_port(amdgpu_connector);
if ((amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) !=
ENCODER_OBJECT_ID_NONE) &&
amdgpu_connector->ddc_bus->has_aux) {
amdgpu_connector->edid = drm_get_edid(connector,
&amdgpu_connector->ddc_bus->aux.ddc);
} else if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
(connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
struct amdgpu_connector_atom_dig *dig = amdgpu_connector->con_priv;
if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) &&
amdgpu_connector->ddc_bus->has_aux)
amdgpu_connector->edid = drm_get_edid(connector,
&amdgpu_connector->ddc_bus->aux.ddc);
else if (amdgpu_connector->ddc_bus)
amdgpu_connector->edid = drm_get_edid(connector,
&amdgpu_connector->ddc_bus->adapter);
} else if (amdgpu_connector->ddc_bus) {
amdgpu_connector->edid = drm_get_edid(connector,
&amdgpu_connector->ddc_bus->adapter);
}
if (!amdgpu_connector->edid) {
/* some laptops provide a hardcoded edid in rom for LCDs */
if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) ||
(connector->connector_type == DRM_MODE_CONNECTOR_eDP))) {
amdgpu_connector->edid = amdgpu_connector_get_hardcoded_edid(adev);
drm_connector_update_edid_property(connector, amdgpu_connector->edid);
}
}
}
static void amdgpu_connector_free_edid(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
kfree(amdgpu_connector->edid);
amdgpu_connector->edid = NULL;
}
static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
int ret;
if (amdgpu_connector->edid) {
drm_connector_update_edid_property(connector, amdgpu_connector->edid);
ret = drm_add_edid_modes(connector, amdgpu_connector->edid);
return ret;
}
drm_connector_update_edid_property(connector, NULL);
return 0;
}
static struct drm_encoder *
amdgpu_connector_best_single_encoder(struct drm_connector *connector)
{
struct drm_encoder *encoder;
/* pick the first one */
drm_connector_for_each_possible_encoder(connector, encoder)
return encoder;
return NULL;
}
static void amdgpu_get_native_mode(struct drm_connector *connector)
{
struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
struct amdgpu_encoder *amdgpu_encoder;
if (encoder == NULL)
return;
amdgpu_encoder = to_amdgpu_encoder(encoder);
if (!list_empty(&connector->probed_modes)) {
struct drm_display_mode *preferred_mode =
list_first_entry(&connector->probed_modes,
struct drm_display_mode, head);
amdgpu_encoder->native_mode = *preferred_mode;
} else {
amdgpu_encoder->native_mode.clock = 0;
}
}
static struct drm_display_mode *
amdgpu_connector_lcd_native_mode(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_display_mode *mode = NULL;
struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
if (native_mode->hdisplay != 0 &&
native_mode->vdisplay != 0 &&
native_mode->clock != 0) {
mode = drm_mode_duplicate(dev, native_mode);
if (!mode)
return NULL;
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
drm_mode_set_name(mode);
DRM_DEBUG_KMS("Adding native panel mode %s\n", mode->name);
} else if (native_mode->hdisplay != 0 &&
native_mode->vdisplay != 0) {
/* mac laptops without an edid */
/* Note that this is not necessarily the exact panel mode,
* but an approximation based on the cvt formula. For these
* systems we should ideally read the mode info out of the
* registers or add a mode table, but this works and is much
* simpler.
*/
mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false);
if (!mode)
return NULL;
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name);
}
return mode;
}
static void amdgpu_connector_add_common_modes(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_display_mode *mode = NULL;
struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
int i;
static const struct mode_size {
int w;
int h;
} common_modes[17] = {
{ 640, 480},
{ 720, 480},
{ 800, 600},
{ 848, 480},
{1024, 768},
{1152, 768},
{1280, 720},
{1280, 800},
{1280, 854},
{1280, 960},
{1280, 1024},
{1440, 900},
{1400, 1050},
{1680, 1050},
{1600, 1200},
{1920, 1080},
{1920, 1200}
};
for (i = 0; i < 17; i++) {
if (amdgpu_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) {
if (common_modes[i].w > 1024 ||
common_modes[i].h > 768)
continue;
}
if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (common_modes[i].w > native_mode->hdisplay ||
common_modes[i].h > native_mode->vdisplay ||
(common_modes[i].w == native_mode->hdisplay &&
common_modes[i].h == native_mode->vdisplay))
continue;
}
if (common_modes[i].w < 320 || common_modes[i].h < 200)
continue;
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
drm_mode_probed_add(connector, mode);
}
}
static int amdgpu_connector_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
if (property == adev->mode_info.coherent_mode_property) {
struct amdgpu_encoder_atom_dig *dig;
bool new_coherent_mode;
/* need to find digital encoder on connector */
encoder = amdgpu_connector_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
if (!encoder)
return 0;
amdgpu_encoder = to_amdgpu_encoder(encoder);
if (!amdgpu_encoder->enc_priv)
return 0;
dig = amdgpu_encoder->enc_priv;
new_coherent_mode = val ? true : false;
if (dig->coherent_mode != new_coherent_mode) {
dig->coherent_mode = new_coherent_mode;
amdgpu_connector_property_change_mode(&amdgpu_encoder->base);
}
}
if (property == adev->mode_info.audio_property) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
/* need to find digital encoder on connector */
encoder = amdgpu_connector_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
if (!encoder)
return 0;
amdgpu_encoder = to_amdgpu_encoder(encoder);
if (amdgpu_connector->audio != val) {
amdgpu_connector->audio = val;
amdgpu_connector_property_change_mode(&amdgpu_encoder->base);
}
}
if (property == adev->mode_info.dither_property) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
/* need to find digital encoder on connector */
encoder = amdgpu_connector_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
if (!encoder)
return 0;
amdgpu_encoder = to_amdgpu_encoder(encoder);
if (amdgpu_connector->dither != val) {
amdgpu_connector->dither = val;
amdgpu_connector_property_change_mode(&amdgpu_encoder->base);
}
}
if (property == adev->mode_info.underscan_property) {
/* need to find digital encoder on connector */
encoder = amdgpu_connector_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
if (!encoder)
return 0;
amdgpu_encoder = to_amdgpu_encoder(encoder);
if (amdgpu_encoder->underscan_type != val) {
amdgpu_encoder->underscan_type = val;
amdgpu_connector_property_change_mode(&amdgpu_encoder->base);
}
}
if (property == adev->mode_info.underscan_hborder_property) {
/* need to find digital encoder on connector */
encoder = amdgpu_connector_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
if (!encoder)
return 0;
amdgpu_encoder = to_amdgpu_encoder(encoder);
if (amdgpu_encoder->underscan_hborder != val) {
amdgpu_encoder->underscan_hborder = val;
amdgpu_connector_property_change_mode(&amdgpu_encoder->base);
}
}
if (property == adev->mode_info.underscan_vborder_property) {
/* need to find digital encoder on connector */
encoder = amdgpu_connector_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
if (!encoder)
return 0;
amdgpu_encoder = to_amdgpu_encoder(encoder);
if (amdgpu_encoder->underscan_vborder != val) {
amdgpu_encoder->underscan_vborder = val;
amdgpu_connector_property_change_mode(&amdgpu_encoder->base);
}
}
if (property == adev->mode_info.load_detect_property) {
struct amdgpu_connector *amdgpu_connector =
to_amdgpu_connector(connector);
if (val == 0)
amdgpu_connector->dac_load_detect = false;
else
amdgpu_connector->dac_load_detect = true;
}
if (property == dev->mode_config.scaling_mode_property) {
enum amdgpu_rmx_type rmx_type;
if (connector->encoder) {
amdgpu_encoder = to_amdgpu_encoder(connector->encoder);
} else {
const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
amdgpu_encoder = to_amdgpu_encoder(connector_funcs->best_encoder(connector));
}
switch (val) {
default:
case DRM_MODE_SCALE_NONE:
rmx_type = RMX_OFF;
break;
case DRM_MODE_SCALE_CENTER:
rmx_type = RMX_CENTER;
break;
case DRM_MODE_SCALE_ASPECT:
rmx_type = RMX_ASPECT;
break;
case DRM_MODE_SCALE_FULLSCREEN:
rmx_type = RMX_FULL;
break;
}
if (amdgpu_encoder->rmx_type == rmx_type)
return 0;
if ((rmx_type != DRM_MODE_SCALE_NONE) &&
(amdgpu_encoder->native_mode.clock == 0))
return 0;
amdgpu_encoder->rmx_type = rmx_type;
amdgpu_connector_property_change_mode(&amdgpu_encoder->base);
}
return 0;
}
static void
amdgpu_connector_fixup_lcd_native_mode(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
struct drm_display_mode *t, *mode;
/* If the EDID preferred mode doesn't match the native mode, use it */
list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
if (mode->type & DRM_MODE_TYPE_PREFERRED) {
if (mode->hdisplay != native_mode->hdisplay ||
mode->vdisplay != native_mode->vdisplay)
drm_mode_copy(native_mode, mode);
}
}
/* Try to get native mode details from EDID if necessary */
if (!native_mode->clock) {
list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
if (mode->hdisplay == native_mode->hdisplay &&
mode->vdisplay == native_mode->vdisplay) {
drm_mode_copy(native_mode, mode);
drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V);
DRM_DEBUG_KMS("Determined LVDS native mode details from EDID\n");
break;
}
}
}
if (!native_mode->clock) {
DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n");
amdgpu_encoder->rmx_type = RMX_OFF;
}
}
static int amdgpu_connector_lvds_get_modes(struct drm_connector *connector)
{
struct drm_encoder *encoder;
int ret = 0;
struct drm_display_mode *mode;
amdgpu_connector_get_edid(connector);
ret = amdgpu_connector_ddc_get_modes(connector);
if (ret > 0) {
encoder = amdgpu_connector_best_single_encoder(connector);
if (encoder) {
amdgpu_connector_fixup_lcd_native_mode(encoder, connector);
/* add scaled modes */
amdgpu_connector_add_common_modes(encoder, connector);
}
return ret;
}
encoder = amdgpu_connector_best_single_encoder(connector);
if (!encoder)
return 0;
/* we have no EDID modes */
mode = amdgpu_connector_lcd_native_mode(encoder);
if (mode) {
ret = 1;
drm_mode_probed_add(connector, mode);
/* add the width/height from vbios tables if available */
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
/* add scaled modes */
amdgpu_connector_add_common_modes(encoder, connector);
}
return ret;
}
static enum drm_mode_status amdgpu_connector_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
if ((mode->hdisplay < 320) || (mode->vdisplay < 240))
return MODE_PANEL;
if (encoder) {
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
/* AVIVO hardware supports downscaling modes larger than the panel
* to the panel size, but I'm not sure this is desirable.
*/
if ((mode->hdisplay > native_mode->hdisplay) ||
(mode->vdisplay > native_mode->vdisplay))
return MODE_PANEL;
/* if scaling is disabled, block non-native modes */
if (amdgpu_encoder->rmx_type == RMX_OFF) {
if ((mode->hdisplay != native_mode->hdisplay) ||
(mode->vdisplay != native_mode->vdisplay))
return MODE_PANEL;
}
}
return MODE_OK;
}
static enum drm_connector_status
amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
enum drm_connector_status ret = connector_status_disconnected;
int r;
if (!drm_kms_helper_is_poll_worker()) {
r = pm_runtime_get_sync(connector->dev->dev);
if (r < 0) {
pm_runtime_put_autosuspend(connector->dev->dev);
return connector_status_disconnected;
}
}
if (encoder) {
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
/* check if panel is valid */
if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
ret = connector_status_connected;
}
/* check for edid as well */
amdgpu_connector_get_edid(connector);
if (amdgpu_connector->edid)
ret = connector_status_connected;
/* check acpi lid status ??? */
amdgpu_connector_update_scratch_regs(connector, ret);
if (!drm_kms_helper_is_poll_worker()) {
pm_runtime_mark_last_busy(connector->dev->dev);
pm_runtime_put_autosuspend(connector->dev->dev);
}
return ret;
}
static void amdgpu_connector_unregister(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->ddc_bus && amdgpu_connector->ddc_bus->has_aux) {
drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux);
amdgpu_connector->ddc_bus->has_aux = false;
}
}
static void amdgpu_connector_destroy(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
amdgpu_connector_free_edid(connector);
kfree(amdgpu_connector->con_priv);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
static int amdgpu_connector_set_lcd_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value)
{
struct drm_device *dev = connector->dev;
struct amdgpu_encoder *amdgpu_encoder;
enum amdgpu_rmx_type rmx_type;
DRM_DEBUG_KMS("\n");
if (property != dev->mode_config.scaling_mode_property)
return 0;
if (connector->encoder)
amdgpu_encoder = to_amdgpu_encoder(connector->encoder);
else {
const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
amdgpu_encoder = to_amdgpu_encoder(connector_funcs->best_encoder(connector));
}
switch (value) {
case DRM_MODE_SCALE_NONE:
rmx_type = RMX_OFF;
break;
case DRM_MODE_SCALE_CENTER:
rmx_type = RMX_CENTER;
break;
case DRM_MODE_SCALE_ASPECT:
rmx_type = RMX_ASPECT;
break;
default:
case DRM_MODE_SCALE_FULLSCREEN:
rmx_type = RMX_FULL;
break;
}
if (amdgpu_encoder->rmx_type == rmx_type)
return 0;
amdgpu_encoder->rmx_type = rmx_type;
amdgpu_connector_property_change_mode(&amdgpu_encoder->base);
return 0;
}
static const struct drm_connector_helper_funcs amdgpu_connector_lvds_helper_funcs = {
.get_modes = amdgpu_connector_lvds_get_modes,
.mode_valid = amdgpu_connector_lvds_mode_valid,
.best_encoder = amdgpu_connector_best_single_encoder,
};
static const struct drm_connector_funcs amdgpu_connector_lvds_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = amdgpu_connector_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.set_property = amdgpu_connector_set_lcd_property,
};
static int amdgpu_connector_vga_get_modes(struct drm_connector *connector)
{
int ret;
amdgpu_connector_get_edid(connector);
ret = amdgpu_connector_ddc_get_modes(connector);
amdgpu_get_native_mode(connector);
return ret;
}
static enum drm_mode_status amdgpu_connector_vga_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
/* XXX check mode bandwidth */
if ((mode->clock / 10) > adev->clock.max_pixel_clock)
return MODE_CLOCK_HIGH;
return MODE_OK;
}
static enum drm_connector_status
amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct drm_encoder *encoder;
const struct drm_encoder_helper_funcs *encoder_funcs;
bool dret = false;
enum drm_connector_status ret = connector_status_disconnected;
int r;
if (!drm_kms_helper_is_poll_worker()) {
r = pm_runtime_get_sync(connector->dev->dev);
if (r < 0) {
pm_runtime_put_autosuspend(connector->dev->dev);
return connector_status_disconnected;
}
}
encoder = amdgpu_connector_best_single_encoder(connector);
if (!encoder)
ret = connector_status_disconnected;
if (amdgpu_connector->ddc_bus)
dret = amdgpu_display_ddc_probe(amdgpu_connector, false);
if (dret) {
amdgpu_connector->detected_by_load = false;
amdgpu_connector_free_edid(connector);
amdgpu_connector_get_edid(connector);
if (!amdgpu_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
connector->name);
ret = connector_status_connected;
} else {
amdgpu_connector->use_digital =
!!(amdgpu_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
/* some oems have boards with separate digital and analog connectors
* with a shared ddc line (often vga + hdmi)
*/
if (amdgpu_connector->use_digital && amdgpu_connector->shared_ddc) {
amdgpu_connector_free_edid(connector);
ret = connector_status_disconnected;
} else {
ret = connector_status_connected;
}
}
} else {
/* if we aren't forcing don't do destructive polling */
if (!force) {
/* only return the previous status if we last
* detected a monitor via load.
*/
if (amdgpu_connector->detected_by_load)
ret = connector->status;
goto out;
}
if (amdgpu_connector->dac_load_detect && encoder) {
encoder_funcs = encoder->helper_private;
ret = encoder_funcs->detect(encoder, connector);
if (ret != connector_status_disconnected)
amdgpu_connector->detected_by_load = true;
}
}
amdgpu_connector_update_scratch_regs(connector, ret);
out:
if (!drm_kms_helper_is_poll_worker()) {
pm_runtime_mark_last_busy(connector->dev->dev);
pm_runtime_put_autosuspend(connector->dev->dev);
}
return ret;
}
static const struct drm_connector_helper_funcs amdgpu_connector_vga_helper_funcs = {
.get_modes = amdgpu_connector_vga_get_modes,
.mode_valid = amdgpu_connector_vga_mode_valid,
.best_encoder = amdgpu_connector_best_single_encoder,
};
static const struct drm_connector_funcs amdgpu_connector_vga_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = amdgpu_connector_vga_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.set_property = amdgpu_connector_set_property,
};
static bool
amdgpu_connector_check_hpd_status_unchanged(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
enum drm_connector_status status;
if (amdgpu_connector->hpd.hpd != AMDGPU_HPD_NONE) {
if (amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd))
status = connector_status_connected;
else
status = connector_status_disconnected;
if (connector->status == status)
return true;
}
return false;
}
/*
* DVI is complicated
* Do a DDC probe, if DDC probe passes, get the full EDID so
* we can do analog/digital monitor detection at this point.
* If the monitor is an analog monitor or we got no DDC,
* we need to find the DAC encoder object for this connector.
* If we got no DDC, we do load detection on the DAC encoder object.
* If we got analog DDC or load detection passes on the DAC encoder
* we have to check if this analog encoder is shared with anyone else (TV)
* if its shared we have to set the other connector to disconnected.
*/
static enum drm_connector_status
amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
const struct drm_encoder_helper_funcs *encoder_funcs;
int r;
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false, broken_edid = false;
if (!drm_kms_helper_is_poll_worker()) {
r = pm_runtime_get_sync(connector->dev->dev);
if (r < 0) {
pm_runtime_put_autosuspend(connector->dev->dev);
return connector_status_disconnected;
}
}
if (amdgpu_connector->detected_hpd_without_ddc) {
force = true;
amdgpu_connector->detected_hpd_without_ddc = false;
}
if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
ret = connector->status;
goto exit;
}
if (amdgpu_connector->ddc_bus) {
dret = amdgpu_display_ddc_probe(amdgpu_connector, false);
/* Sometimes the pins required for the DDC probe on DVI
* connectors don't make contact at the same time that the ones
* for HPD do. If the DDC probe fails even though we had an HPD
* signal, try again later
*/
if (!dret && !force &&
amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) {
DRM_DEBUG_KMS("hpd detected without ddc, retrying in 1 second\n");
amdgpu_connector->detected_hpd_without_ddc = true;
schedule_delayed_work(&adev->hotplug_work,
msecs_to_jiffies(1000));
goto exit;
}
}
if (dret) {
amdgpu_connector->detected_by_load = false;
amdgpu_connector_free_edid(connector);
amdgpu_connector_get_edid(connector);
if (!amdgpu_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
connector->name);
ret = connector_status_connected;
broken_edid = true; /* defer use_digital to later */
} else {
amdgpu_connector->use_digital =
!!(amdgpu_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
/* some oems have boards with separate digital and analog connectors
* with a shared ddc line (often vga + hdmi)
*/
if ((!amdgpu_connector->use_digital) && amdgpu_connector->shared_ddc) {
amdgpu_connector_free_edid(connector);
ret = connector_status_disconnected;
} else {
ret = connector_status_connected;
}
/* This gets complicated. We have boards with VGA + HDMI with a
* shared DDC line and we have boards with DVI-D + HDMI with a shared
* DDC line. The latter is more complex because with DVI<->HDMI adapters
* you don't really know what's connected to which port as both are digital.
*/
if (amdgpu_connector->shared_ddc && (ret == connector_status_connected)) {
struct drm_connector *list_connector;
struct drm_connector_list_iter iter;
struct amdgpu_connector *list_amdgpu_connector;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(list_connector,
&iter) {
if (connector == list_connector)
continue;
list_amdgpu_connector = to_amdgpu_connector(list_connector);
if (list_amdgpu_connector->shared_ddc &&
(list_amdgpu_connector->ddc_bus->rec.i2c_id ==
amdgpu_connector->ddc_bus->rec.i2c_id)) {
/* cases where both connectors are digital */
if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) {
/* hpd is our only option in this case */
if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) {
amdgpu_connector_free_edid(connector);
ret = connector_status_disconnected;
}
}
}
}
drm_connector_list_iter_end(&iter);
}
}
}
if ((ret == connector_status_connected) && (amdgpu_connector->use_digital == true))
goto out;
/* DVI-D and HDMI-A are digital only */
if ((connector->connector_type == DRM_MODE_CONNECTOR_DVID) ||
(connector->connector_type == DRM_MODE_CONNECTOR_HDMIA))
goto out;
/* if we aren't forcing don't do destructive polling */
if (!force) {
/* only return the previous status if we last
* detected a monitor via load.
*/
if (amdgpu_connector->detected_by_load)
ret = connector->status;
goto out;
}
/* find analog encoder */
if (amdgpu_connector->dac_load_detect) {
struct drm_encoder *encoder;
drm_connector_for_each_possible_encoder(connector, encoder) {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
continue;
encoder_funcs = encoder->helper_private;
if (encoder_funcs->detect) {
if (!broken_edid) {
if (ret != connector_status_connected) {
/* deal with analog monitors without DDC */
ret = encoder_funcs->detect(encoder, connector);
if (ret == connector_status_connected) {
amdgpu_connector->use_digital = false;
}
if (ret != connector_status_disconnected)
amdgpu_connector->detected_by_load = true;
}
} else {
enum drm_connector_status lret;
/* assume digital unless load detected otherwise */
amdgpu_connector->use_digital = true;
lret = encoder_funcs->detect(encoder, connector);
DRM_DEBUG_KMS("load_detect %x returned: %x\n",
encoder->encoder_type, lret);
if (lret == connector_status_connected)
amdgpu_connector->use_digital = false;
}
break;
}
}
}
out:
/* updated in get modes as well since we need to know if it's analog or digital */
amdgpu_connector_update_scratch_regs(connector, ret);
exit:
if (!drm_kms_helper_is_poll_worker()) {
pm_runtime_mark_last_busy(connector->dev->dev);
pm_runtime_put_autosuspend(connector->dev->dev);
}
return ret;
}
/* okay need to be smart in here about which encoder to pick */
static struct drm_encoder *
amdgpu_connector_dvi_encoder(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct drm_encoder *encoder;
drm_connector_for_each_possible_encoder(connector, encoder) {
if (amdgpu_connector->use_digital == true) {
if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
return encoder;
} else {
if (encoder->encoder_type == DRM_MODE_ENCODER_DAC ||
encoder->encoder_type == DRM_MODE_ENCODER_TVDAC)
return encoder;
}
}
/* see if we have a default encoder TODO */
/* then check use digitial */
/* pick the first one */
drm_connector_for_each_possible_encoder(connector, encoder)
return encoder;
return NULL;
}
static void amdgpu_connector_dvi_force(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (connector->force == DRM_FORCE_ON)
amdgpu_connector->use_digital = false;
if (connector->force == DRM_FORCE_ON_DIGITAL)
amdgpu_connector->use_digital = true;
}
static enum drm_mode_status amdgpu_connector_dvi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
/* XXX check mode bandwidth */
if (amdgpu_connector->use_digital && (mode->clock > 165000)) {
if ((amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) ||
(amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
(amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B)) {
return MODE_OK;
} else if (connector->display_info.is_hdmi) {
/* HDMI 1.3+ supports max clock of 340 Mhz */
if (mode->clock > 340000)
return MODE_CLOCK_HIGH;
else
return MODE_OK;
} else {
return MODE_CLOCK_HIGH;
}
}
/* check against the max pixel clock */
if ((mode->clock / 10) > adev->clock.max_pixel_clock)
return MODE_CLOCK_HIGH;
return MODE_OK;
}
static const struct drm_connector_helper_funcs amdgpu_connector_dvi_helper_funcs = {
.get_modes = amdgpu_connector_vga_get_modes,
.mode_valid = amdgpu_connector_dvi_mode_valid,
.best_encoder = amdgpu_connector_dvi_encoder,
};
static const struct drm_connector_funcs amdgpu_connector_dvi_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = amdgpu_connector_dvi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = amdgpu_connector_set_property,
.early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_dvi_force,
};
static int amdgpu_connector_dp_get_modes(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct amdgpu_connector_atom_dig *amdgpu_dig_connector = amdgpu_connector->con_priv;
struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
int ret;
if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
(connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
struct drm_display_mode *mode;
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
if (!amdgpu_dig_connector->edp_on)
amdgpu_atombios_encoder_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_ON);
amdgpu_connector_get_edid(connector);
ret = amdgpu_connector_ddc_get_modes(connector);
if (!amdgpu_dig_connector->edp_on)
amdgpu_atombios_encoder_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_OFF);
} else {
/* need to setup ddc on the bridge */
if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) !=
ENCODER_OBJECT_ID_NONE) {
if (encoder)
amdgpu_atombios_encoder_setup_ext_encoder_ddc(encoder);
}
amdgpu_connector_get_edid(connector);
ret = amdgpu_connector_ddc_get_modes(connector);
}
if (ret > 0) {
if (encoder) {
amdgpu_connector_fixup_lcd_native_mode(encoder, connector);
/* add scaled modes */
amdgpu_connector_add_common_modes(encoder, connector);
}
return ret;
}
if (!encoder)
return 0;
/* we have no EDID modes */
mode = amdgpu_connector_lcd_native_mode(encoder);
if (mode) {
ret = 1;
drm_mode_probed_add(connector, mode);
/* add the width/height from vbios tables if available */
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
/* add scaled modes */
amdgpu_connector_add_common_modes(encoder, connector);
}
} else {
/* need to setup ddc on the bridge */
if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) !=
ENCODER_OBJECT_ID_NONE) {
if (encoder)
amdgpu_atombios_encoder_setup_ext_encoder_ddc(encoder);
}
amdgpu_connector_get_edid(connector);
ret = amdgpu_connector_ddc_get_modes(connector);
amdgpu_get_native_mode(connector);
}
return ret;
}
u16 amdgpu_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector)
{
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
drm_connector_for_each_possible_encoder(connector, encoder) {
amdgpu_encoder = to_amdgpu_encoder(encoder);
switch (amdgpu_encoder->encoder_id) {
case ENCODER_OBJECT_ID_TRAVIS:
case ENCODER_OBJECT_ID_NUTMEG:
return amdgpu_encoder->encoder_id;
default:
break;
}
}
return ENCODER_OBJECT_ID_NONE;
}
static bool amdgpu_connector_encoder_is_hbr2(struct drm_connector *connector)
{
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
bool found = false;
drm_connector_for_each_possible_encoder(connector, encoder) {
amdgpu_encoder = to_amdgpu_encoder(encoder);
if (amdgpu_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
found = true;
}
return found;
}
bool amdgpu_connector_is_dp12_capable(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
if ((adev->clock.default_dispclk >= 53900) &&
amdgpu_connector_encoder_is_hbr2(connector)) {
return true;
}
return false;
}
static enum drm_connector_status
amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
enum drm_connector_status ret = connector_status_disconnected;
struct amdgpu_connector_atom_dig *amdgpu_dig_connector = amdgpu_connector->con_priv;
struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
int r;
if (!drm_kms_helper_is_poll_worker()) {
r = pm_runtime_get_sync(connector->dev->dev);
if (r < 0) {
pm_runtime_put_autosuspend(connector->dev->dev);
return connector_status_disconnected;
}
}
if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
ret = connector->status;
goto out;
}
amdgpu_connector_free_edid(connector);
if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
(connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
if (encoder) {
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
/* check if panel is valid */
if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
ret = connector_status_connected;
}
/* eDP is always DP */
amdgpu_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
if (!amdgpu_dig_connector->edp_on)
amdgpu_atombios_encoder_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_ON);
if (!amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
ret = connector_status_connected;
if (!amdgpu_dig_connector->edp_on)
amdgpu_atombios_encoder_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_OFF);
} else if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) !=
ENCODER_OBJECT_ID_NONE) {
/* DP bridges are always DP */
amdgpu_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
/* get the DPCD from the bridge */
amdgpu_atombios_dp_get_dpcd(amdgpu_connector);
if (encoder) {
/* setup ddc on the bridge */
amdgpu_atombios_encoder_setup_ext_encoder_ddc(encoder);
/* bridge chips are always aux */
/* try DDC */
if (amdgpu_display_ddc_probe(amdgpu_connector, true))
ret = connector_status_connected;
else if (amdgpu_connector->dac_load_detect) { /* try load detection */
const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
ret = encoder_funcs->detect(encoder, connector);
}
}
} else {
amdgpu_dig_connector->dp_sink_type =
amdgpu_atombios_dp_get_sinktype(amdgpu_connector);
if (amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) {
ret = connector_status_connected;
if (amdgpu_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
amdgpu_atombios_dp_get_dpcd(amdgpu_connector);
} else {
if (amdgpu_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
if (!amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
ret = connector_status_connected;
} else {
/* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */
if (amdgpu_display_ddc_probe(amdgpu_connector,
false))
ret = connector_status_connected;
}
}
}
amdgpu_connector_update_scratch_regs(connector, ret);
out:
if (!drm_kms_helper_is_poll_worker()) {
pm_runtime_mark_last_busy(connector->dev->dev);
pm_runtime_put_autosuspend(connector->dev->dev);
}
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector->connector_type == DRM_MODE_CONNECTOR_eDP)
drm_dp_set_subconnector_property(&amdgpu_connector->base,
ret,
amdgpu_dig_connector->dpcd,
amdgpu_dig_connector->downstream_ports);
return ret;
}
static enum drm_mode_status amdgpu_connector_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct amdgpu_connector_atom_dig *amdgpu_dig_connector = amdgpu_connector->con_priv;
/* XXX check mode bandwidth */
if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
(connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
if ((mode->hdisplay < 320) || (mode->vdisplay < 240))
return MODE_PANEL;
if (encoder) {
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
/* AVIVO hardware supports downscaling modes larger than the panel
* to the panel size, but I'm not sure this is desirable.
*/
if ((mode->hdisplay > native_mode->hdisplay) ||
(mode->vdisplay > native_mode->vdisplay))
return MODE_PANEL;
/* if scaling is disabled, block non-native modes */
if (amdgpu_encoder->rmx_type == RMX_OFF) {
if ((mode->hdisplay != native_mode->hdisplay) ||
(mode->vdisplay != native_mode->vdisplay))
return MODE_PANEL;
}
}
return MODE_OK;
} else {
if ((amdgpu_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(amdgpu_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
return amdgpu_atombios_dp_mode_valid_helper(connector, mode);
} else {
if (connector->display_info.is_hdmi) {
/* HDMI 1.3+ supports max clock of 340 Mhz */
if (mode->clock > 340000)
return MODE_CLOCK_HIGH;
} else {
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
}
}
}
return MODE_OK;
}
static int
amdgpu_connector_late_register(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
int r = 0;
if (amdgpu_connector->ddc_bus->has_aux) {
amdgpu_connector->ddc_bus->aux.dev = amdgpu_connector->base.kdev;
r = drm_dp_aux_register(&amdgpu_connector->ddc_bus->aux);
}
return r;
}
static const struct drm_connector_helper_funcs amdgpu_connector_dp_helper_funcs = {
.get_modes = amdgpu_connector_dp_get_modes,
.mode_valid = amdgpu_connector_dp_mode_valid,
.best_encoder = amdgpu_connector_dvi_encoder,
};
static const struct drm_connector_funcs amdgpu_connector_dp_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = amdgpu_connector_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = amdgpu_connector_set_property,
.early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_dvi_force,
.late_register = amdgpu_connector_late_register,
};
static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = amdgpu_connector_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = amdgpu_connector_set_lcd_property,
.early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_dvi_force,
.late_register = amdgpu_connector_late_register,
};
void
amdgpu_connector_add(struct amdgpu_device *adev,
uint32_t connector_id,
uint32_t supported_device,
int connector_type,
struct amdgpu_i2c_bus_rec *i2c_bus,
uint16_t connector_object_id,
struct amdgpu_hpd *hpd,
struct amdgpu_router *router)
{
struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector;
struct amdgpu_connector_atom_dig *amdgpu_dig_connector;
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
struct i2c_adapter *ddc = NULL;
uint32_t subpixel_order = SubPixelNone;
bool shared_ddc = false;
bool is_dp_bridge = false;
bool has_aux = false;
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
return;
/* see if we already added it */
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->connector_id == connector_id) {
amdgpu_connector->devices |= supported_device;
drm_connector_list_iter_end(&iter);
return;
}
if (amdgpu_connector->ddc_bus && i2c_bus->valid) {
if (amdgpu_connector->ddc_bus->rec.i2c_id == i2c_bus->i2c_id) {
amdgpu_connector->shared_ddc = true;
shared_ddc = true;
}
if (amdgpu_connector->router_bus && router->ddc_valid &&
(amdgpu_connector->router.router_id == router->router_id)) {
amdgpu_connector->shared_ddc = false;
shared_ddc = false;
}
}
}
drm_connector_list_iter_end(&iter);
/* check if it's a dp bridge */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
amdgpu_encoder = to_amdgpu_encoder(encoder);
if (amdgpu_encoder->devices & supported_device) {
switch (amdgpu_encoder->encoder_id) {
case ENCODER_OBJECT_ID_TRAVIS:
case ENCODER_OBJECT_ID_NUTMEG:
is_dp_bridge = true;
break;
default:
break;
}
}
}
amdgpu_connector = kzalloc(sizeof(struct amdgpu_connector), GFP_KERNEL);
if (!amdgpu_connector)
return;
connector = &amdgpu_connector->base;
amdgpu_connector->connector_id = connector_id;
amdgpu_connector->devices = supported_device;
amdgpu_connector->shared_ddc = shared_ddc;
amdgpu_connector->connector_object_id = connector_object_id;
amdgpu_connector->hpd = *hpd;
amdgpu_connector->router = *router;
if (router->ddc_valid || router->cd_valid) {
amdgpu_connector->router_bus = amdgpu_i2c_lookup(adev, &router->i2c_info);
if (!amdgpu_connector->router_bus)
DRM_ERROR("Failed to assign router i2c bus! Check dmesg for i2c errors.\n");
}
if (is_dp_bridge) {
amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
if (amdgpu_connector->ddc_bus) {
has_aux = true;
ddc = &amdgpu_connector->ddc_bus->adapter;
} else {
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
}
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
case DRM_MODE_CONNECTOR_DVIA:
default:
drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
&amdgpu_connector_dp_funcs,
connector_type,
ddc);
drm_connector_helper_add(&amdgpu_connector->base,
&amdgpu_connector_dp_helper_funcs);
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
amdgpu_connector->dac_load_detect = true;
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.load_detect_property,
1);
drm_object_attach_property(&amdgpu_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_NONE);
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
case DRM_MODE_CONNECTOR_DisplayPort:
drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
&amdgpu_connector_dp_funcs,
connector_type,
ddc);
drm_connector_helper_add(&amdgpu_connector->base,
&amdgpu_connector_dp_helper_funcs);
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.underscan_property,
UNDERSCAN_OFF);
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.underscan_hborder_property,
0);
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.underscan_vborder_property,
0);
drm_object_attach_property(&amdgpu_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_NONE);
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.dither_property,
AMDGPU_FMT_DITHER_DISABLE);
if (amdgpu_audio != 0) {
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.audio_property,
AMDGPU_AUDIO_AUTO);
amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
}
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
connector->doublescan_allowed = true;
else
connector->doublescan_allowed = false;
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
amdgpu_connector->dac_load_detect = true;
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.load_detect_property,
1);
}
break;
case DRM_MODE_CONNECTOR_LVDS:
case DRM_MODE_CONNECTOR_eDP:
drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
&amdgpu_connector_edp_funcs,
connector_type,
ddc);
drm_connector_helper_add(&amdgpu_connector->base,
&amdgpu_connector_dp_helper_funcs);
drm_object_attach_property(&amdgpu_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
break;
}
} else {
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
if (!amdgpu_connector->ddc_bus)
DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
else
ddc = &amdgpu_connector->ddc_bus->adapter;
}
drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
&amdgpu_connector_vga_funcs,
connector_type,
ddc);
drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_vga_helper_funcs);
amdgpu_connector->dac_load_detect = true;
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.load_detect_property,
1);
drm_object_attach_property(&amdgpu_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_NONE);
/* no HPD on analog connectors */
amdgpu_connector->hpd.hpd = AMDGPU_HPD_NONE;
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVIA:
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
if (!amdgpu_connector->ddc_bus)
DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
else
ddc = &amdgpu_connector->ddc_bus->adapter;
}
drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
&amdgpu_connector_vga_funcs,
connector_type,
ddc);
drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_vga_helper_funcs);
amdgpu_connector->dac_load_detect = true;
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.load_detect_property,
1);
drm_object_attach_property(&amdgpu_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_NONE);
/* no HPD on analog connectors */
amdgpu_connector->hpd.hpd = AMDGPU_HPD_NONE;
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
if (!amdgpu_connector->ddc_bus)
DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
else
ddc = &amdgpu_connector->ddc_bus->adapter;
}
drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
&amdgpu_connector_dvi_funcs,
connector_type,
ddc);
drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dvi_helper_funcs);
subpixel_order = SubPixelHorizontalRGB;
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.coherent_mode_property,
1);
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.underscan_property,
UNDERSCAN_OFF);
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.underscan_hborder_property,
0);
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.underscan_vborder_property,
0);
drm_object_attach_property(&amdgpu_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_NONE);
if (amdgpu_audio != 0) {
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.audio_property,
AMDGPU_AUDIO_AUTO);
amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
}
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.dither_property,
AMDGPU_FMT_DITHER_DISABLE);
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
amdgpu_connector->dac_load_detect = true;
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.load_detect_property,
1);
}
connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_DVII)
connector->doublescan_allowed = true;
else
connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
if (!amdgpu_connector->ddc_bus)
DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
else
ddc = &amdgpu_connector->ddc_bus->adapter;
}
drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
&amdgpu_connector_dvi_funcs,
connector_type,
ddc);
drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dvi_helper_funcs);
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.coherent_mode_property,
1);
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.underscan_property,
UNDERSCAN_OFF);
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.underscan_hborder_property,
0);
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.underscan_vborder_property,
0);
drm_object_attach_property(&amdgpu_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_NONE);
if (amdgpu_audio != 0) {
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.audio_property,
AMDGPU_AUDIO_AUTO);
amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
}
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.dither_property,
AMDGPU_FMT_DITHER_DISABLE);
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
connector->doublescan_allowed = true;
else
connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
if (amdgpu_connector->ddc_bus) {
has_aux = true;
ddc = &amdgpu_connector->ddc_bus->adapter;
} else {
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
}
drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
&amdgpu_connector_dp_funcs,
connector_type,
ddc);
drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dp_helper_funcs);
subpixel_order = SubPixelHorizontalRGB;
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.coherent_mode_property,
1);
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.underscan_property,
UNDERSCAN_OFF);
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.underscan_hborder_property,
0);
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.underscan_vborder_property,
0);
drm_object_attach_property(&amdgpu_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_NONE);
if (amdgpu_audio != 0) {
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.audio_property,
AMDGPU_AUDIO_AUTO);
amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
}
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.dither_property,
AMDGPU_FMT_DITHER_DISABLE);
connector->interlace_allowed = true;
/* in theory with a DP to VGA converter... */
connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_eDP:
amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
if (amdgpu_connector->ddc_bus) {
has_aux = true;
ddc = &amdgpu_connector->ddc_bus->adapter;
} else {
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
}
drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
&amdgpu_connector_edp_funcs,
connector_type,
ddc);
drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dp_helper_funcs);
drm_object_attach_property(&amdgpu_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_LVDS:
amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL);
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
if (!amdgpu_connector->ddc_bus)
DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
else
ddc = &amdgpu_connector->ddc_bus->adapter;
}
drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
&amdgpu_connector_lvds_funcs,
connector_type,
ddc);
drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_lvds_helper_funcs);
drm_object_attach_property(&amdgpu_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
break;
}
}
if (amdgpu_connector->hpd.hpd == AMDGPU_HPD_NONE) {
if (i2c_bus->valid) {
connector->polled = DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT;
}
} else
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->display_info.subpixel_order = subpixel_order;
if (has_aux)
amdgpu_atombios_dp_aux_init(amdgpu_connector);
if (connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector_type == DRM_MODE_CONNECTOR_eDP) {
drm_connector_attach_dp_subconnector_property(&amdgpu_connector->base);
}
return;
failed:
drm_connector_cleanup(connector);
kfree(connector);
}
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c |
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "amdgpu_ih.h"
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
#include "amdgpu_ucode.h"
#include "amdgpu_psp.h"
#include "atom.h"
#include "amd_pcie.h"
#include "uvd/uvd_7_0_offset.h"
#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"
#include "sdma0/sdma0_4_0_offset.h"
#include "sdma1/sdma1_4_0_offset.h"
#include "nbio/nbio_7_0_default.h"
#include "nbio/nbio_7_0_offset.h"
#include "nbio/nbio_7_0_sh_mask.h"
#include "nbio/nbio_7_0_smn.h"
#include "mp/mp_9_0_offset.h"
#include "soc15.h"
#include "soc15_common.h"
#include "gfx_v9_0.h"
#include "gmc_v9_0.h"
#include "gfxhub_v1_0.h"
#include "mmhub_v1_0.h"
#include "df_v1_7.h"
#include "df_v3_6.h"
#include "nbio_v6_1.h"
#include "nbio_v7_0.h"
#include "nbio_v7_4.h"
#include "hdp_v4_0.h"
#include "vega10_ih.h"
#include "vega20_ih.h"
#include "navi10_ih.h"
#include "sdma_v4_0.h"
#include "uvd_v7_0.h"
#include "vce_v4_0.h"
#include "vcn_v1_0.h"
#include "vcn_v2_0.h"
#include "jpeg_v2_0.h"
#include "vcn_v2_5.h"
#include "jpeg_v2_5.h"
#include "smuio_v9_0.h"
#include "smuio_v11_0.h"
#include "smuio_v13_0.h"
#include "amdgpu_vkms.h"
#include "mxgpu_ai.h"
#include "amdgpu_ras.h"
#include "amdgpu_xgmi.h"
#include <uapi/linux/kfd_ioctl.h>
#define mmMP0_MISC_CGTT_CTRL0 0x01b9
#define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0
#define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
#define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0
static const struct amd_ip_funcs soc15_common_ip_funcs;
/* Vega, Raven, Arcturus */
static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] =
{
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
};
static const struct amdgpu_video_codecs vega_video_codecs_encode =
{
.codec_count = ARRAY_SIZE(vega_video_codecs_encode_array),
.codec_array = vega_video_codecs_encode_array,
};
/* Vega */
static const struct amdgpu_video_codec_info vega_video_codecs_decode_array[] =
{
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
};
static const struct amdgpu_video_codecs vega_video_codecs_decode =
{
.codec_count = ARRAY_SIZE(vega_video_codecs_decode_array),
.codec_array = vega_video_codecs_decode_array,
};
/* Raven */
static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] =
{
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 4096, 4096, 0)},
};
static const struct amdgpu_video_codecs rv_video_codecs_decode =
{
.codec_count = ARRAY_SIZE(rv_video_codecs_decode_array),
.codec_array = rv_video_codecs_decode_array,
};
/* Renoir, Arcturus */
static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] =
{
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};
static const struct amdgpu_video_codecs rn_video_codecs_decode =
{
.codec_count = ARRAY_SIZE(rn_video_codecs_decode_array),
.codec_array = rn_video_codecs_decode_array,
};
static const struct amdgpu_video_codec_info vcn_4_0_3_video_codecs_decode_array[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
static const struct amdgpu_video_codecs vcn_4_0_3_video_codecs_decode = {
.codec_count = ARRAY_SIZE(vcn_4_0_3_video_codecs_decode_array),
.codec_array = vcn_4_0_3_video_codecs_decode_array,
};
static const struct amdgpu_video_codecs vcn_4_0_3_video_codecs_encode = {
.codec_count = 0,
.codec_array = NULL,
};
static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode,
const struct amdgpu_video_codecs **codecs)
{
if (adev->ip_versions[VCE_HWIP][0]) {
switch (adev->ip_versions[VCE_HWIP][0]) {
case IP_VERSION(4, 0, 0):
case IP_VERSION(4, 1, 0):
if (encode)
*codecs = &vega_video_codecs_encode;
else
*codecs = &vega_video_codecs_decode;
return 0;
default:
return -EINVAL;
}
} else {
switch (adev->ip_versions[UVD_HWIP][0]) {
case IP_VERSION(1, 0, 0):
case IP_VERSION(1, 0, 1):
if (encode)
*codecs = &vega_video_codecs_encode;
else
*codecs = &rv_video_codecs_decode;
return 0;
case IP_VERSION(2, 5, 0):
case IP_VERSION(2, 6, 0):
case IP_VERSION(2, 2, 0):
if (encode)
*codecs = &vega_video_codecs_encode;
else
*codecs = &rn_video_codecs_decode;
return 0;
case IP_VERSION(4, 0, 3):
if (encode)
*codecs = &vcn_4_0_3_video_codecs_encode;
else
*codecs = &vcn_4_0_3_video_codecs_decode;
return 0;
default:
return -EINVAL;
}
}
}
static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
u32 r;
address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
WREG32(address, ((reg) & 0x1ff));
r = RREG32(data);
spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
return r;
}
static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long flags, address, data;
address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
WREG32(address, ((reg) & 0x1ff));
WREG32(data, (v));
spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
}
static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
u32 r;
address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
spin_lock_irqsave(&adev->didt_idx_lock, flags);
WREG32(address, (reg));
r = RREG32(data);
spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
return r;
}
static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long flags, address, data;
address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
spin_lock_irqsave(&adev->didt_idx_lock, flags);
WREG32(address, (reg));
WREG32(data, (v));
spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
}
static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags;
u32 r;
spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA);
spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
return r;
}
static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long flags;
spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v));
spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
}
static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags;
u32 r;
spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA);
spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
return r;
}
static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long flags;
spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v));
spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
}
static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
{
return adev->nbio.funcs->get_memsize(adev);
}
static u32 soc15_get_xclk(struct amdgpu_device *adev)
{
u32 reference_clock = adev->clock.spll.reference_freq;
if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0) ||
adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1))
return 10000;
if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 0) ||
adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 1))
return reference_clock / 4;
return reference_clock;
}
void soc15_grbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid, int xcc_id)
{
u32 grbm_gfx_cntl = 0;
grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
WREG32_SOC15_RLC_SHADOW(GC, xcc_id, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
}
static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
{
/* todo */
return false;
}
static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
{ SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
{ SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
{ SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)},
};
static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
u32 sh_num, u32 reg_offset)
{
uint32_t val;
mutex_lock(&adev->grbm_idx_mutex);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0);
val = RREG32(reg_offset);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
return val;
}
static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
bool indexed, u32 se_num,
u32 sh_num, u32 reg_offset)
{
if (indexed) {
return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
} else {
if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
return adev->gfx.config.gb_addr_config;
else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2))
return adev->gfx.config.db_debug2;
return RREG32(reg_offset);
}
}
static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
u32 sh_num, u32 reg_offset, u32 *value)
{
uint32_t i;
struct soc15_allowed_register_entry *en;
*value = 0;
for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
en = &soc15_allowed_read_registers[i];
if (!adev->reg_offset[en->hwip][en->inst])
continue;
else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+ en->reg_offset))
continue;
*value = soc15_get_register_value(adev,
soc15_allowed_read_registers[i].grbm_indexed,
se_num, sh_num, reg_offset);
return 0;
}
return -EINVAL;
}
/**
* soc15_program_register_sequence - program an array of registers.
*
* @adev: amdgpu_device pointer
* @regs: pointer to the register array
* @array_size: size of the register array
*
* Programs an array or registers with and and or masks.
* This is a helper for setting golden registers.
*/
void soc15_program_register_sequence(struct amdgpu_device *adev,
const struct soc15_reg_golden *regs,
const u32 array_size)
{
const struct soc15_reg_golden *entry;
u32 tmp, reg;
int i;
for (i = 0; i < array_size; ++i) {
entry = ®s[i];
reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
if (entry->and_mask == 0xffffffff) {
tmp = entry->or_mask;
} else {
tmp = (entry->hwip == GC_HWIP) ?
RREG32_SOC15_IP(GC, reg) : RREG32(reg);
tmp &= ~(entry->and_mask);
tmp |= (entry->or_mask & entry->and_mask);
}
if (reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3) ||
reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE) ||
reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1) ||
reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG))
WREG32_RLC(reg, tmp);
else
(entry->hwip == GC_HWIP) ?
WREG32_SOC15_IP(GC, reg, tmp) : WREG32(reg, tmp);
}
}
static int soc15_asic_baco_reset(struct amdgpu_device *adev)
{
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
int ret = 0;
/* avoid NBIF got stuck when do RAS recovery in BACO reset */
if (ras && adev->ras_enabled)
adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
ret = amdgpu_dpm_baco_reset(adev);
if (ret)
return ret;
/* re-enable doorbell interrupt after BACO exit */
if (ras && adev->ras_enabled)
adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
return 0;
}
static enum amd_reset_method
soc15_asic_reset_method(struct amdgpu_device *adev)
{
bool baco_reset = false;
bool connected_to_cpu = false;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
if (adev->gmc.xgmi.supported && adev->gmc.xgmi.connected_to_cpu)
connected_to_cpu = true;
if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
amdgpu_reset_method == AMD_RESET_METHOD_PCI) {
/* If connected to cpu, driver only support mode2 */
if (connected_to_cpu)
return AMD_RESET_METHOD_MODE2;
return amdgpu_reset_method;
}
if (amdgpu_reset_method != -1)
dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
amdgpu_reset_method);
switch (adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(10, 0, 0):
case IP_VERSION(10, 0, 1):
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
return AMD_RESET_METHOD_MODE2;
case IP_VERSION(9, 0, 0):
case IP_VERSION(11, 0, 2):
if (adev->asic_type == CHIP_VEGA20) {
if (adev->psp.sos.fw_version >= 0x80067)
baco_reset = amdgpu_dpm_is_baco_supported(adev);
/*
* 1. PMFW version > 0x284300: all cases use baco
* 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco
*/
if (ras && adev->ras_enabled &&
adev->pm.fw_version <= 0x283400)
baco_reset = false;
} else {
baco_reset = amdgpu_dpm_is_baco_supported(adev);
}
break;
case IP_VERSION(13, 0, 2):
/*
* 1.connected to cpu: driver issue mode2 reset
* 2.discret gpu: driver issue mode1 reset
*/
if (connected_to_cpu)
return AMD_RESET_METHOD_MODE2;
break;
case IP_VERSION(13, 0, 6):
/* Use gpu_recovery param to target a reset method.
* Enable triggering of GPU reset only if specified
* by module parameter.
*/
if (amdgpu_gpu_recovery == 4 || amdgpu_gpu_recovery == 5)
return AMD_RESET_METHOD_MODE2;
else if (!(adev->flags & AMD_IS_APU))
return AMD_RESET_METHOD_MODE1;
else
return AMD_RESET_METHOD_MODE2;
default:
break;
}
if (baco_reset)
return AMD_RESET_METHOD_BACO;
else
return AMD_RESET_METHOD_MODE1;
}
static int soc15_asic_reset(struct amdgpu_device *adev)
{
/* original raven doesn't have full asic reset */
if ((adev->apu_flags & AMD_APU_IS_RAVEN) ||
(adev->apu_flags & AMD_APU_IS_RAVEN2))
return 0;
switch (soc15_asic_reset_method(adev)) {
case AMD_RESET_METHOD_PCI:
dev_info(adev->dev, "PCI reset\n");
return amdgpu_device_pci_reset(adev);
case AMD_RESET_METHOD_BACO:
dev_info(adev->dev, "BACO reset\n");
return soc15_asic_baco_reset(adev);
case AMD_RESET_METHOD_MODE2:
dev_info(adev->dev, "MODE2 reset\n");
return amdgpu_dpm_mode2_reset(adev);
default:
dev_info(adev->dev, "MODE1 reset\n");
return amdgpu_device_mode1_reset(adev);
}
}
static bool soc15_supports_baco(struct amdgpu_device *adev)
{
switch (adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(9, 0, 0):
case IP_VERSION(11, 0, 2):
if (adev->asic_type == CHIP_VEGA20) {
if (adev->psp.sos.fw_version >= 0x80067)
return amdgpu_dpm_is_baco_supported(adev);
return false;
} else {
return amdgpu_dpm_is_baco_supported(adev);
}
break;
default:
return false;
}
}
/*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
u32 cntl_reg, u32 status_reg)
{
return 0;
}*/
static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
{
/*int r;
r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
if (r)
return r;
r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
*/
return 0;
}
static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
{
/* todo */
return 0;
}
static void soc15_program_aspm(struct amdgpu_device *adev)
{
if (!amdgpu_device_should_use_aspm(adev))
return;
if (!(adev->flags & AMD_IS_APU) &&
(adev->nbio.funcs->program_aspm))
adev->nbio.funcs->program_aspm(adev);
}
const struct amdgpu_ip_block_version vega10_common_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_COMMON,
.major = 2,
.minor = 0,
.rev = 0,
.funcs = &soc15_common_ip_funcs,
};
static void soc15_reg_base_init(struct amdgpu_device *adev)
{
/* Set IP register base before any HW register access */
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_RAVEN:
case CHIP_RENOIR:
vega10_reg_base_init(adev);
break;
case CHIP_VEGA20:
vega20_reg_base_init(adev);
break;
case CHIP_ARCTURUS:
arct_reg_base_init(adev);
break;
case CHIP_ALDEBARAN:
aldebaran_reg_base_init(adev);
break;
default:
DRM_ERROR("Unsupported asic type: %d!\n", adev->asic_type);
break;
}
}
void soc15_set_virt_ops(struct amdgpu_device *adev)
{
adev->virt.ops = &xgpu_ai_virt_ops;
/* init soc15 reg base early enough so we can
* request request full access for sriov before
* set_ip_blocks. */
soc15_reg_base_init(adev);
}
static bool soc15_need_full_reset(struct amdgpu_device *adev)
{
/* change this when we implement soft reset */
return true;
}
static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
uint64_t *count1)
{
uint32_t perfctr = 0;
uint64_t cnt0_of, cnt1_of;
int tmp;
/* This reports 0 on APUs, so return to avoid writing/reading registers
* that may or may not be different from their GPU counterparts
*/
if (adev->flags & AMD_IS_APU)
return;
/* Set the 2 events that we wish to watch, defined above */
/* Reg 40 is # received msgs */
/* Reg 104 is # of posted requests sent */
perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
/* Write to enable desired perf counters */
WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr);
/* Zero out and enable the perf counters
* Write 0x5:
* Bit 0 = Start all counters(1)
* Bit 2 = Global counter reset enable(1)
*/
WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
msleep(1000);
/* Load the shadow and disable the perf counters
* Write 0x2:
* Bit 0 = Stop counters(0)
* Bit 1 = Load the shadow counters(1)
*/
WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
/* Read register values to get any >32bit overflow */
tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK);
cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
/* Get the values and add the overflow */
*count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
*count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
}
static void vega20_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
uint64_t *count1)
{
uint32_t perfctr = 0;
uint64_t cnt0_of, cnt1_of;
int tmp;
/* This reports 0 on APUs, so return to avoid writing/reading registers
* that may or may not be different from their GPU counterparts
*/
if (adev->flags & AMD_IS_APU)
return;
/* Set the 2 events that we wish to watch, defined above */
/* Reg 40 is # received msgs */
/* Reg 108 is # of posted requests sent on VG20 */
perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
EVENT0_SEL, 40);
perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
EVENT1_SEL, 108);
/* Write to enable desired perf counters */
WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctr);
/* Zero out and enable the perf counters
* Write 0x5:
* Bit 0 = Start all counters(1)
* Bit 2 = Global counter reset enable(1)
*/
WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
msleep(1000);
/* Load the shadow and disable the perf counters
* Write 0x2:
* Bit 0 = Stop counters(0)
* Bit 1 = Load the shadow counters(1)
*/
WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
/* Read register values to get any >32bit overflow */
tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3);
cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER0_UPPER);
cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER1_UPPER);
/* Get the values and add the overflow */
*count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) | (cnt0_of << 32);
*count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK3) | (cnt1_of << 32);
}
static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
{
u32 sol_reg;
/* CP hangs in IGT reloading test on RN, reset to WA */
if (adev->asic_type == CHIP_RENOIR)
return true;
/* Just return false for soc15 GPUs. Reset does not seem to
* be necessary.
*/
if (!amdgpu_passthrough(adev))
return false;
if (adev->flags & AMD_IS_APU)
return false;
/* Check sOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
if (sol_reg)
return true;
return false;
}
static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev)
{
uint64_t nak_r, nak_g;
/* Get the number of NAKs received and generated */
nak_r = RREG32_PCIE(smnPCIE_RX_NUM_NAK);
nak_g = RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED);
/* Add the total number of NAKs, i.e the number of replays */
return (nak_r + nak_g);
}
static void soc15_pre_asic_init(struct amdgpu_device *adev)
{
gmc_v9_0_restore_registers(adev);
}
static const struct amdgpu_asic_funcs soc15_asic_funcs =
{
.read_disabled_bios = &soc15_read_disabled_bios,
.read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
.read_register = &soc15_read_register,
.reset = &soc15_asic_reset,
.reset_method = &soc15_asic_reset_method,
.get_xclk = &soc15_get_xclk,
.set_uvd_clocks = &soc15_set_uvd_clocks,
.set_vce_clocks = &soc15_set_vce_clocks,
.get_config_memsize = &soc15_get_config_memsize,
.need_full_reset = &soc15_need_full_reset,
.init_doorbell_index = &vega10_doorbell_index_init,
.get_pcie_usage = &soc15_get_pcie_usage,
.need_reset_on_init = &soc15_need_reset_on_init,
.get_pcie_replay_count = &soc15_get_pcie_replay_count,
.supports_baco = &soc15_supports_baco,
.pre_asic_init = &soc15_pre_asic_init,
.query_video_codecs = &soc15_query_video_codecs,
};
static const struct amdgpu_asic_funcs vega20_asic_funcs =
{
.read_disabled_bios = &soc15_read_disabled_bios,
.read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
.read_register = &soc15_read_register,
.reset = &soc15_asic_reset,
.reset_method = &soc15_asic_reset_method,
.get_xclk = &soc15_get_xclk,
.set_uvd_clocks = &soc15_set_uvd_clocks,
.set_vce_clocks = &soc15_set_vce_clocks,
.get_config_memsize = &soc15_get_config_memsize,
.need_full_reset = &soc15_need_full_reset,
.init_doorbell_index = &vega20_doorbell_index_init,
.get_pcie_usage = &vega20_get_pcie_usage,
.need_reset_on_init = &soc15_need_reset_on_init,
.get_pcie_replay_count = &soc15_get_pcie_replay_count,
.supports_baco = &soc15_supports_baco,
.pre_asic_init = &soc15_pre_asic_init,
.query_video_codecs = &soc15_query_video_codecs,
};
static const struct amdgpu_asic_funcs aqua_vanjaram_asic_funcs =
{
.read_disabled_bios = &soc15_read_disabled_bios,
.read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
.read_register = &soc15_read_register,
.reset = &soc15_asic_reset,
.reset_method = &soc15_asic_reset_method,
.get_xclk = &soc15_get_xclk,
.set_uvd_clocks = &soc15_set_uvd_clocks,
.set_vce_clocks = &soc15_set_vce_clocks,
.get_config_memsize = &soc15_get_config_memsize,
.need_full_reset = &soc15_need_full_reset,
.init_doorbell_index = &aqua_vanjaram_doorbell_index_init,
.get_pcie_usage = &amdgpu_nbio_get_pcie_usage,
.need_reset_on_init = &soc15_need_reset_on_init,
.get_pcie_replay_count = &amdgpu_nbio_get_pcie_replay_count,
.supports_baco = &soc15_supports_baco,
.pre_asic_init = &soc15_pre_asic_init,
.query_video_codecs = &soc15_query_video_codecs,
.encode_ext_smn_addressing = &aqua_vanjaram_encode_ext_smn_addressing,
};
static int soc15_common_early_init(void *handle)
{
#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!amdgpu_sriov_vf(adev)) {
adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
}
adev->smc_rreg = NULL;
adev->smc_wreg = NULL;
adev->pcie_rreg = &amdgpu_device_indirect_rreg;
adev->pcie_wreg = &amdgpu_device_indirect_wreg;
adev->pcie_rreg_ext = &amdgpu_device_indirect_rreg_ext;
adev->pcie_wreg_ext = &amdgpu_device_indirect_wreg_ext;
adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64;
adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64;
adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
adev->didt_rreg = &soc15_didt_rreg;
adev->didt_wreg = &soc15_didt_wreg;
adev->gc_cac_rreg = &soc15_gc_cac_rreg;
adev->gc_cac_wreg = &soc15_gc_cac_wreg;
adev->se_cac_rreg = &soc15_se_cac_rreg;
adev->se_cac_wreg = &soc15_se_cac_wreg;
adev->rev_id = amdgpu_device_get_rev_id(adev);
adev->external_rev_id = 0xFF;
/* TODO: split the GC and PG flags based on the relevant IP version for which
* they are relevant.
*/
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 0, 1):
adev->asic_funcs = &soc15_asic_funcs;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_RLC_LS |
AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_BIF_MGCG |
AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_DRM_MGCG |
AMD_CG_SUPPORT_DRM_LS |
AMD_CG_SUPPORT_ROM_MGCG |
AMD_CG_SUPPORT_DF_MGCG |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_MC_MGCG |
AMD_CG_SUPPORT_MC_LS;
adev->pg_flags = 0;
adev->external_rev_id = 0x1;
break;
case IP_VERSION(9, 2, 1):
adev->asic_funcs = &soc15_asic_funcs;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS |
AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_MC_MGCG |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_BIF_MGCG |
AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_HDP_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_ROM_MGCG |
AMD_CG_SUPPORT_VCE_MGCG |
AMD_CG_SUPPORT_UVD_MGCG;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x14;
break;
case IP_VERSION(9, 4, 0):
adev->asic_funcs = &vega20_asic_funcs;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS |
AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_MC_MGCG |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_BIF_MGCG |
AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_HDP_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_ROM_MGCG |
AMD_CG_SUPPORT_VCE_MGCG |
AMD_CG_SUPPORT_UVD_MGCG;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x28;
break;
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 2):
adev->asic_funcs = &soc15_asic_funcs;
if (adev->rev_id >= 0x8)
adev->apu_flags |= AMD_APU_IS_RAVEN2;
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
adev->external_rev_id = adev->rev_id + 0x79;
else if (adev->apu_flags & AMD_APU_IS_PICASSO)
adev->external_rev_id = adev->rev_id + 0x41;
else if (adev->rev_id == 1)
adev->external_rev_id = adev->rev_id + 0x20;
else
adev->external_rev_id = adev->rev_id + 0x01;
if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_MC_MGCG |
AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_VCN_MGCG;
adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
} else if (adev->apu_flags & AMD_APU_IS_PICASSO) {
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_GFX_3D_CGLS |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_MC_MGCG |
AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_VCN_MGCG;
/*
* MMHUB PG needs to be disabled for Picasso for
* stability reasons.
*/
adev->pg_flags = AMD_PG_SUPPORT_SDMA |
AMD_PG_SUPPORT_VCN;
} else {
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_RLC_LS |
AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_GFX_3D_CGLS |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_BIF_MGCG |
AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_HDP_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_DRM_MGCG |
AMD_CG_SUPPORT_DRM_LS |
AMD_CG_SUPPORT_MC_MGCG |
AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_VCN_MGCG;
adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
}
break;
case IP_VERSION(9, 4, 1):
adev->asic_funcs = &vega20_asic_funcs;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_HDP_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_MC_MGCG |
AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_IH_CG |
AMD_CG_SUPPORT_VCN_MGCG |
AMD_CG_SUPPORT_JPEG_MGCG;
adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG;
adev->external_rev_id = adev->rev_id + 0x32;
break;
case IP_VERSION(9, 3, 0):
adev->asic_funcs = &soc15_asic_funcs;
if (adev->apu_flags & AMD_APU_IS_RENOIR)
adev->external_rev_id = adev->rev_id + 0x91;
else
adev->external_rev_id = adev->rev_id + 0xa1;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_MC_MGCG |
AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_VCN_MGCG |
AMD_CG_SUPPORT_JPEG_MGCG |
AMD_CG_SUPPORT_IH_CG |
AMD_CG_SUPPORT_ATHUB_LS |
AMD_CG_SUPPORT_ATHUB_MGCG |
AMD_CG_SUPPORT_DF_MGCG;
adev->pg_flags = AMD_PG_SUPPORT_SDMA |
AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_JPEG |
AMD_PG_SUPPORT_VCN_DPG;
break;
case IP_VERSION(9, 4, 2):
adev->asic_funcs = &vega20_asic_funcs;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_IH_CG |
AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG;
adev->pg_flags = AMD_PG_SUPPORT_VCN_DPG;
adev->external_rev_id = adev->rev_id + 0x3c;
break;
case IP_VERSION(9, 4, 3):
adev->asic_funcs = &aqua_vanjaram_asic_funcs;
adev->cg_flags =
AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_GFX_FGCG | AMD_CG_SUPPORT_REPEATER_FGCG |
AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG |
AMD_CG_SUPPORT_IH_CG;
adev->pg_flags =
AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG;
adev->external_rev_id = adev->rev_id + 0x46;
break;
default:
/* FIXME: not supported yet */
return -EINVAL;
}
if (amdgpu_sriov_vf(adev)) {
amdgpu_virt_init_setting(adev);
xgpu_ai_mailbox_set_irq_funcs(adev);
}
return 0;
}
static int soc15_common_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_get_irq(adev);
/* Enable selfring doorbell aperture late because doorbell BAR
* aperture will change if resize BAR successfully in gmc sw_init.
*/
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true);
return 0;
}
static int soc15_common_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_add_irq_id(adev);
if (adev->df.funcs &&
adev->df.funcs->sw_init)
adev->df.funcs->sw_init(adev);
return 0;
}
static int soc15_common_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->df.funcs &&
adev->df.funcs->sw_fini)
adev->df.funcs->sw_fini(adev);
return 0;
}
static void soc15_sdma_doorbell_range_init(struct amdgpu_device *adev)
{
int i;
/* sdma doorbell range is programed by hypervisor */
if (!amdgpu_sriov_vf(adev)) {
for (i = 0; i < adev->sdma.num_instances; i++) {
adev->nbio.funcs->sdma_doorbell_range(adev, i,
true, adev->doorbell_index.sdma_engine[i] << 1,
adev->doorbell_index.sdma_doorbell_range);
}
}
}
static int soc15_common_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* enable aspm */
soc15_program_aspm(adev);
/* setup nbio registers */
adev->nbio.funcs->init_registers(adev);
/* remap HDP registers to a hole in mmio space,
* for the purpose of expose those registers
* to process space
*/
if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
adev->nbio.funcs->remap_hdp_registers(adev);
/* enable the doorbell aperture */
adev->nbio.funcs->enable_doorbell_aperture(adev, true);
/* HW doorbell routing policy: doorbell writing not
* in SDMA/IH/MM/ACV range will be routed to CP. So
* we need to init SDMA doorbell range prior
* to CP ip block init and ring test. IH already
* happens before CP.
*/
soc15_sdma_doorbell_range_init(adev);
return 0;
}
static int soc15_common_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* Disable the doorbell aperture and selfring doorbell aperture
* separately in hw_fini because soc15_enable_doorbell_aperture
* has been removed and there is no need to delay disabling
* selfring doorbell.
*/
adev->nbio.funcs->enable_doorbell_aperture(adev, false);
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false);
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_put_irq(adev);
if (adev->nbio.ras_if &&
amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
if (adev->nbio.ras &&
adev->nbio.ras->init_ras_controller_interrupt)
amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0);
if (adev->nbio.ras &&
adev->nbio.ras->init_ras_err_event_athub_interrupt)
amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
}
return 0;
}
static int soc15_common_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return soc15_common_hw_fini(adev);
}
static int soc15_common_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return soc15_common_hw_init(adev);
}
static bool soc15_common_is_idle(void *handle)
{
return true;
}
static int soc15_common_wait_for_idle(void *handle)
{
return 0;
}
static int soc15_common_soft_reset(void *handle)
{
return 0;
}
static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
{
uint32_t def, data;
def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG))
data &= ~(0x01000000 |
0x02000000 |
0x04000000 |
0x08000000 |
0x10000000 |
0x20000000 |
0x40000000 |
0x80000000);
else
data |= (0x01000000 |
0x02000000 |
0x04000000 |
0x08000000 |
0x10000000 |
0x20000000 |
0x40000000 |
0x80000000);
if (def != data)
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data);
}
static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable)
{
uint32_t def, data;
def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
data |= 1;
else
data &= ~1;
if (def != data)
WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data);
}
static int soc15_common_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (amdgpu_sriov_vf(adev))
return 0;
switch (adev->ip_versions[NBIO_HWIP][0]) {
case IP_VERSION(6, 1, 0):
case IP_VERSION(6, 2, 0):
case IP_VERSION(7, 4, 0):
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE);
adev->hdp.funcs->update_clock_gating(adev,
state == AMD_CG_STATE_GATE);
soc15_update_drm_clock_gating(adev,
state == AMD_CG_STATE_GATE);
soc15_update_drm_light_sleep(adev,
state == AMD_CG_STATE_GATE);
adev->smuio.funcs->update_rom_clock_gating(adev,
state == AMD_CG_STATE_GATE);
adev->df.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
case IP_VERSION(7, 0, 0):
case IP_VERSION(7, 0, 1):
case IP_VERSION(2, 5, 0):
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE);
adev->hdp.funcs->update_clock_gating(adev,
state == AMD_CG_STATE_GATE);
soc15_update_drm_clock_gating(adev,
state == AMD_CG_STATE_GATE);
soc15_update_drm_light_sleep(adev,
state == AMD_CG_STATE_GATE);
break;
case IP_VERSION(7, 4, 1):
case IP_VERSION(7, 4, 4):
adev->hdp.funcs->update_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
default:
break;
}
return 0;
}
static void soc15_common_get_clockgating_state(void *handle, u64 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int data;
if (amdgpu_sriov_vf(adev))
*flags = 0;
adev->nbio.funcs->get_clockgating_state(adev, flags);
adev->hdp.funcs->get_clock_gating_state(adev, flags);
if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 2)) {
/* AMD_CG_SUPPORT_DRM_MGCG */
data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
if (!(data & 0x01000000))
*flags |= AMD_CG_SUPPORT_DRM_MGCG;
/* AMD_CG_SUPPORT_DRM_LS */
data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
if (data & 0x1)
*flags |= AMD_CG_SUPPORT_DRM_LS;
}
/* AMD_CG_SUPPORT_ROM_MGCG */
adev->smuio.funcs->get_clock_gating_state(adev, flags);
adev->df.funcs->get_clockgating_state(adev, flags);
}
static int soc15_common_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
/* todo */
return 0;
}
static const struct amd_ip_funcs soc15_common_ip_funcs = {
.name = "soc15_common",
.early_init = soc15_common_early_init,
.late_init = soc15_common_late_init,
.sw_init = soc15_common_sw_init,
.sw_fini = soc15_common_sw_fini,
.hw_init = soc15_common_hw_init,
.hw_fini = soc15_common_hw_fini,
.suspend = soc15_common_suspend,
.resume = soc15_common_resume,
.is_idle = soc15_common_is_idle,
.wait_for_idle = soc15_common_wait_for_idle,
.soft_reset = soc15_common_soft_reset,
.set_clockgating_state = soc15_common_set_clockgating_state,
.set_powergating_state = soc15_common_set_powergating_state,
.get_clockgating_state= soc15_common_get_clockgating_state,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/soc15.c |
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "athub_v3_0.h"
#include "athub/athub_3_0_0_offset.h"
#include "athub/athub_3_0_0_sh_mask.h"
#include "navi10_enum.h"
#include "soc15_common.h"
#define regATHUB_MISC_CNTL_V3_0_1 0x00d7
#define regATHUB_MISC_CNTL_V3_0_1_BASE_IDX 0
static uint32_t athub_v3_0_get_cg_cntl(struct amdgpu_device *adev)
{
uint32_t data;
switch (adev->ip_versions[ATHUB_HWIP][0]) {
case IP_VERSION(3, 0, 1):
data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1);
break;
default:
data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
break;
}
return data;
}
static void athub_v3_0_set_cg_cntl(struct amdgpu_device *adev, uint32_t data)
{
switch (adev->ip_versions[ATHUB_HWIP][0]) {
case IP_VERSION(3, 0, 1):
WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1, data);
break;
default:
WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
break;
}
}
static void
athub_v3_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t def, data;
def = data = athub_v3_0_get_cg_cntl(adev);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_MGCG))
data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
else
data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
if (def != data)
athub_v3_0_set_cg_cntl(adev, data);
}
static void
athub_v3_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
bool enable)
{
uint32_t def, data;
def = data = athub_v3_0_get_cg_cntl(adev);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_LS))
data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
else
data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
if (def != data)
athub_v3_0_set_cg_cntl(adev, data);
}
int athub_v3_0_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state)
{
if (amdgpu_sriov_vf(adev))
return 0;
switch (adev->ip_versions[ATHUB_HWIP][0]) {
case IP_VERSION(3, 0, 0):
case IP_VERSION(3, 0, 1):
case IP_VERSION(3, 0, 2):
athub_v3_0_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
athub_v3_0_update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE);
break;
default:
break;
}
return 0;
}
void athub_v3_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
{
int data;
/* AMD_CG_SUPPORT_ATHUB_MGCG */
data = athub_v3_0_get_cg_cntl(adev);
if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
*flags |= AMD_CG_SUPPORT_ATHUB_MGCG;
/* AMD_CG_SUPPORT_ATHUB_LS */
if (data & ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK)
*flags |= AMD_CG_SUPPORT_ATHUB_LS;
}
| linux-master | drivers/gpu/drm/amd/amdgpu/athub_v3_0.c |
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Alex Deucher
*/
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include "amdgpu.h"
#include "amdgpu_ucode.h"
#include "amdgpu_trace.h"
#include "vi.h"
#include "vid.h"
#include "oss/oss_3_0_d.h"
#include "oss/oss_3_0_sh_mask.h"
#include "gmc/gmc_8_1_d.h"
#include "gmc/gmc_8_1_sh_mask.h"
#include "gca/gfx_8_0_d.h"
#include "gca/gfx_8_0_enum.h"
#include "gca/gfx_8_0_sh_mask.h"
#include "bif/bif_5_0_d.h"
#include "bif/bif_5_0_sh_mask.h"
#include "tonga_sdma_pkt_open.h"
#include "ivsrcid/ivsrcid_vislands30.h"
static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev);
MODULE_FIRMWARE("amdgpu/tonga_sdma.bin");
MODULE_FIRMWARE("amdgpu/tonga_sdma1.bin");
MODULE_FIRMWARE("amdgpu/carrizo_sdma.bin");
MODULE_FIRMWARE("amdgpu/carrizo_sdma1.bin");
MODULE_FIRMWARE("amdgpu/fiji_sdma.bin");
MODULE_FIRMWARE("amdgpu/fiji_sdma1.bin");
MODULE_FIRMWARE("amdgpu/stoney_sdma.bin");
MODULE_FIRMWARE("amdgpu/polaris10_sdma.bin");
MODULE_FIRMWARE("amdgpu/polaris10_sdma1.bin");
MODULE_FIRMWARE("amdgpu/polaris11_sdma.bin");
MODULE_FIRMWARE("amdgpu/polaris11_sdma1.bin");
MODULE_FIRMWARE("amdgpu/polaris12_sdma.bin");
MODULE_FIRMWARE("amdgpu/polaris12_sdma1.bin");
MODULE_FIRMWARE("amdgpu/vegam_sdma.bin");
MODULE_FIRMWARE("amdgpu/vegam_sdma1.bin");
static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
{
SDMA0_REGISTER_OFFSET,
SDMA1_REGISTER_OFFSET
};
static const u32 golden_settings_tonga_a11[] =
{
mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
};
static const u32 tonga_mgcg_cgcg_init[] =
{
mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
};
static const u32 golden_settings_fiji_a10[] =
{
mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
};
static const u32 fiji_mgcg_cgcg_init[] =
{
mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
};
static const u32 golden_settings_polaris11_a11[] =
{
mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
};
static const u32 golden_settings_polaris10_a11[] =
{
mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
};
static const u32 cz_golden_settings_a11[] =
{
mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
mmSDMA0_GFX_IB_CNTL, 0x00000100, 0x00000100,
mmSDMA0_POWER_CNTL, 0x00000800, 0x0003c800,
mmSDMA0_RLC0_IB_CNTL, 0x00000100, 0x00000100,
mmSDMA0_RLC1_IB_CNTL, 0x00000100, 0x00000100,
mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
mmSDMA1_GFX_IB_CNTL, 0x00000100, 0x00000100,
mmSDMA1_POWER_CNTL, 0x00000800, 0x0003c800,
mmSDMA1_RLC0_IB_CNTL, 0x00000100, 0x00000100,
mmSDMA1_RLC1_IB_CNTL, 0x00000100, 0x00000100,
};
static const u32 cz_mgcg_cgcg_init[] =
{
mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
};
static const u32 stoney_golden_settings_a11[] =
{
mmSDMA0_GFX_IB_CNTL, 0x00000100, 0x00000100,
mmSDMA0_POWER_CNTL, 0x00000800, 0x0003c800,
mmSDMA0_RLC0_IB_CNTL, 0x00000100, 0x00000100,
mmSDMA0_RLC1_IB_CNTL, 0x00000100, 0x00000100,
};
static const u32 stoney_mgcg_cgcg_init[] =
{
mmSDMA0_CLK_CTRL, 0xffffffff, 0x00000100,
};
/*
* sDMA - System DMA
* Starting with CIK, the GPU has new asynchronous
* DMA engines. These engines are used for compute
* and gfx. There are two DMA engines (SDMA0, SDMA1)
* and each one supports 1 ring buffer used for gfx
* and 2 queues used for compute.
*
* The programming model is very similar to the CP
* (ring buffer, IBs, etc.), but sDMA has it's own
* packet format that is different from the PM4 format
* used by the CP. sDMA supports copying data, writing
* embedded data, solid fills, and a number of other
* things. It also has support for tiling/detiling of
* buffers.
*/
static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_FIJI:
amdgpu_device_program_register_sequence(adev,
fiji_mgcg_cgcg_init,
ARRAY_SIZE(fiji_mgcg_cgcg_init));
amdgpu_device_program_register_sequence(adev,
golden_settings_fiji_a10,
ARRAY_SIZE(golden_settings_fiji_a10));
break;
case CHIP_TONGA:
amdgpu_device_program_register_sequence(adev,
tonga_mgcg_cgcg_init,
ARRAY_SIZE(tonga_mgcg_cgcg_init));
amdgpu_device_program_register_sequence(adev,
golden_settings_tonga_a11,
ARRAY_SIZE(golden_settings_tonga_a11));
break;
case CHIP_POLARIS11:
case CHIP_POLARIS12:
case CHIP_VEGAM:
amdgpu_device_program_register_sequence(adev,
golden_settings_polaris11_a11,
ARRAY_SIZE(golden_settings_polaris11_a11));
break;
case CHIP_POLARIS10:
amdgpu_device_program_register_sequence(adev,
golden_settings_polaris10_a11,
ARRAY_SIZE(golden_settings_polaris10_a11));
break;
case CHIP_CARRIZO:
amdgpu_device_program_register_sequence(adev,
cz_mgcg_cgcg_init,
ARRAY_SIZE(cz_mgcg_cgcg_init));
amdgpu_device_program_register_sequence(adev,
cz_golden_settings_a11,
ARRAY_SIZE(cz_golden_settings_a11));
break;
case CHIP_STONEY:
amdgpu_device_program_register_sequence(adev,
stoney_mgcg_cgcg_init,
ARRAY_SIZE(stoney_mgcg_cgcg_init));
amdgpu_device_program_register_sequence(adev,
stoney_golden_settings_a11,
ARRAY_SIZE(stoney_golden_settings_a11));
break;
default:
break;
}
}
static void sdma_v3_0_free_microcode(struct amdgpu_device *adev)
{
int i;
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
/**
* sdma_v3_0_init_microcode - load ucode images from disk
*
* @adev: amdgpu_device pointer
*
* Use the firmware interface to load the ucode images into
* the driver (not loaded into hw).
* Returns 0 on success, error on failure.
*/
static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
char fw_name[30];
int err = 0, i;
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
const struct sdma_firmware_header_v1_0 *hdr;
DRM_DEBUG("\n");
switch (adev->asic_type) {
case CHIP_TONGA:
chip_name = "tonga";
break;
case CHIP_FIJI:
chip_name = "fiji";
break;
case CHIP_POLARIS10:
chip_name = "polaris10";
break;
case CHIP_POLARIS11:
chip_name = "polaris11";
break;
case CHIP_POLARIS12:
chip_name = "polaris12";
break;
case CHIP_VEGAM:
chip_name = "vegam";
break;
case CHIP_CARRIZO:
chip_name = "carrizo";
break;
case CHIP_STONEY:
chip_name = "stoney";
break;
default: BUG();
}
for (i = 0; i < adev->sdma.num_instances; i++) {
if (i == 0)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, fw_name);
if (err)
goto out;
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
if (adev->sdma.instance[i].feature_version >= 20)
adev->sdma.instance[i].burst_nop = true;
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
info->fw = adev->sdma.instance[i].fw;
header = (const struct common_firmware_header *)info->fw->data;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
}
out:
if (err) {
pr_err("sdma_v3_0: Failed to load firmware \"%s\"\n", fw_name);
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
return err;
}
/**
* sdma_v3_0_ring_get_rptr - get the current read pointer
*
* @ring: amdgpu ring pointer
*
* Get the current rptr from the hardware (VI+).
*/
static uint64_t sdma_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
{
/* XXX check if swapping is necessary on BE */
return *ring->rptr_cpu_addr >> 2;
}
/**
* sdma_v3_0_ring_get_wptr - get the current write pointer
*
* @ring: amdgpu ring pointer
*
* Get the current wptr from the hardware (VI+).
*/
static uint64_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 wptr;
if (ring->use_doorbell || ring->use_pollmem) {
/* XXX check if swapping is necessary on BE */
wptr = *ring->wptr_cpu_addr >> 2;
} else {
wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) >> 2;
}
return wptr;
}
/**
* sdma_v3_0_ring_set_wptr - commit the write pointer
*
* @ring: amdgpu ring pointer
*
* Write the wptr back to the hardware (VI+).
*/
static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring->use_doorbell) {
u32 *wb = (u32 *)ring->wptr_cpu_addr;
/* XXX check if swapping is necessary on BE */
WRITE_ONCE(*wb, ring->wptr << 2);
WDOORBELL32(ring->doorbell_index, ring->wptr << 2);
} else if (ring->use_pollmem) {
u32 *wb = (u32 *)ring->wptr_cpu_addr;
WRITE_ONCE(*wb, ring->wptr << 2);
} else {
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], ring->wptr << 2);
}
}
static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
int i;
for (i = 0; i < count; i++)
if (sdma && sdma->burst_nop && (i == 0))
amdgpu_ring_write(ring, ring->funcs->nop |
SDMA_PKT_NOP_HEADER_COUNT(count - 1));
else
amdgpu_ring_write(ring, ring->funcs->nop);
}
/**
* sdma_v3_0_ring_emit_ib - Schedule an IB on the DMA engine
*
* @ring: amdgpu ring pointer
* @job: job to retrieve vmid from
* @ib: IB object to schedule
* @flags: unused
*
* Schedule an IB in the DMA ring (VI).
*/
static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_job *job,
struct amdgpu_ib *ib,
uint32_t flags)
{
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
/* IB packet must end on a 8 DW boundary */
sdma_v3_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
/* base must be 32 byte aligned */
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, ib->length_dw);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 0);
}
/**
* sdma_v3_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
*
* @ring: amdgpu ring pointer
*
* Emit an hdp flush packet on the requested DMA ring.
*/
static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask = 0;
if (ring->me == 0)
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
else
ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
amdgpu_ring_write(ring, ref_and_mask); /* reference */
amdgpu_ring_write(ring, ref_and_mask); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
/**
* sdma_v3_0_ring_emit_fence - emit a fence on the DMA ring
*
* @ring: amdgpu ring pointer
* @addr: address
* @seq: sequence number
* @flags: fence related flags
*
* Add a DMA fence packet to the ring to write
* the fence seq number and DMA trap packet to generate
* an interrupt if needed (VI).
*/
static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags)
{
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
/* write the fence */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, lower_32_bits(seq));
/* optionally write high bits as well */
if (write64bit) {
addr += 4;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(seq));
}
/* generate an interrupt */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}
/**
* sdma_v3_0_gfx_stop - stop the gfx async dma engines
*
* @adev: amdgpu_device pointer
*
* Stop the gfx async dma ring buffers (VI).
*/
static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
{
u32 rb_cntl, ib_cntl;
int i;
amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
}
}
/**
* sdma_v3_0_rlc_stop - stop the compute async dma engines
*
* @adev: amdgpu_device pointer
*
* Stop the compute async dma queues (VI).
*/
static void sdma_v3_0_rlc_stop(struct amdgpu_device *adev)
{
/* XXX todo */
}
/**
* sdma_v3_0_ctx_switch_enable - stop the async dma engines context switch
*
* @adev: amdgpu_device pointer
* @enable: enable/disable the DMA MEs context switch.
*
* Halt or unhalt the async dma engines context switch (VI).
*/
static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
{
u32 f32_cntl, phase_quantum = 0;
int i;
if (amdgpu_sdma_phase_quantum) {
unsigned value = amdgpu_sdma_phase_quantum;
unsigned unit = 0;
while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
value = (value + 1) >> 1;
unit++;
}
if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
WARN_ONCE(1,
"clamping sdma_phase_quantum to %uK clock cycles\n",
value << unit);
}
phase_quantum =
value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
}
for (i = 0; i < adev->sdma.num_instances; i++) {
f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]);
if (enable) {
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
AUTO_CTXSW_ENABLE, 1);
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
ATC_L1_ENABLE, 1);
if (amdgpu_sdma_phase_quantum) {
WREG32(mmSDMA0_PHASE0_QUANTUM + sdma_offsets[i],
phase_quantum);
WREG32(mmSDMA0_PHASE1_QUANTUM + sdma_offsets[i],
phase_quantum);
}
} else {
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
AUTO_CTXSW_ENABLE, 0);
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
ATC_L1_ENABLE, 1);
}
WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl);
}
}
/**
* sdma_v3_0_enable - stop the async dma engines
*
* @adev: amdgpu_device pointer
* @enable: enable/disable the DMA MEs.
*
* Halt or unhalt the async dma engines (VI).
*/
static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable)
{
u32 f32_cntl;
int i;
if (!enable) {
sdma_v3_0_gfx_stop(adev);
sdma_v3_0_rlc_stop(adev);
}
for (i = 0; i < adev->sdma.num_instances; i++) {
f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
if (enable)
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0);
else
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl);
}
}
/**
* sdma_v3_0_gfx_resume - setup and start the async dma engines
*
* @adev: amdgpu_device pointer
*
* Set up the gfx DMA ring buffers and enable them (VI).
* Returns 0 for success, error for failure.
*/
static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
u32 rb_cntl, ib_cntl, wptr_poll_cntl;
u32 rb_bufsz;
u32 doorbell;
u64 wptr_gpu_addr;
int i, j, r;
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
amdgpu_ring_clear_ring(ring);
mutex_lock(&adev->srbm_mutex);
for (j = 0; j < 16; j++) {
vi_srbm_select(adev, 0, 0, 0, j);
/* SDMA GFX */
WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
}
vi_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
adev->gfx.config.gb_addr_config & 0x70);
WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
/* Set ring buffer size in dwords */
rb_bufsz = order_base_2(ring->ring_size / 4);
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
#ifdef __BIG_ENDIAN
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
RPTR_WRITEBACK_SWAP_ENABLE, 1);
#endif
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
/* Initialize the ring buffer's read and write pointers */
ring->wptr = 0;
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
sdma_v3_0_ring_set_wptr(ring);
WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
/* set the wb address whether it's enabled or not */
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
doorbell = RREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i]);
if (ring->use_doorbell) {
doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL,
OFFSET, ring->doorbell_index);
doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
} else {
doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
}
WREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i], doorbell);
/* setup the wptr shadow polling */
wptr_gpu_addr = ring->wptr_gpu_addr;
WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO + sdma_offsets[i],
lower_32_bits(wptr_gpu_addr));
WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI + sdma_offsets[i],
upper_32_bits(wptr_gpu_addr));
wptr_poll_cntl = RREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i]);
if (ring->use_pollmem) {
/*wptr polling is not enogh fast, directly clean the wptr register */
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
SDMA0_GFX_RB_WPTR_POLL_CNTL,
ENABLE, 1);
} else {
wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
SDMA0_GFX_RB_WPTR_POLL_CNTL,
ENABLE, 0);
}
WREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i], wptr_poll_cntl);
/* enable DMA RB */
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]);
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
#ifdef __BIG_ENDIAN
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
#endif
/* enable DMA IBs */
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
}
/* unhalt the MEs */
sdma_v3_0_enable(adev, true);
/* enable sdma ring preemption */
sdma_v3_0_ctx_switch_enable(adev, true);
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
return 0;
}
/**
* sdma_v3_0_rlc_resume - setup and start the async dma engines
*
* @adev: amdgpu_device pointer
*
* Set up the compute DMA queues and enable them (VI).
* Returns 0 for success, error for failure.
*/
static int sdma_v3_0_rlc_resume(struct amdgpu_device *adev)
{
/* XXX todo */
return 0;
}
/**
* sdma_v3_0_start - setup and start the async dma engines
*
* @adev: amdgpu_device pointer
*
* Set up the DMA engines and enable them (VI).
* Returns 0 for success, error for failure.
*/
static int sdma_v3_0_start(struct amdgpu_device *adev)
{
int r;
/* disable sdma engine before programing it */
sdma_v3_0_ctx_switch_enable(adev, false);
sdma_v3_0_enable(adev, false);
/* start the gfx rings and rlc compute queues */
r = sdma_v3_0_gfx_resume(adev);
if (r)
return r;
r = sdma_v3_0_rlc_resume(adev);
if (r)
return r;
return 0;
}
/**
* sdma_v3_0_ring_test_ring - simple async dma engine test
*
* @ring: amdgpu_ring structure holding ring information
*
* Test the DMA engine by writing using it to write an
* value to memory. (VI).
* Returns 0 for success, error for failure.
*/
static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
unsigned i;
unsigned index;
int r;
u32 tmp;
u64 gpu_addr;
r = amdgpu_device_wb_get(adev, &index);
if (r)
return r;
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
r = amdgpu_ring_alloc(ring, 5);
if (r)
goto error_free_wb;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
udelay(1);
}
if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
error_free_wb:
amdgpu_device_wb_free(adev, index);
return r;
}
/**
* sdma_v3_0_ring_test_ib - test an IB on the DMA engine
*
* @ring: amdgpu_ring structure holding ring information
* @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
*
* Test a simple IB in the DMA ring (VI).
* Returns 0 on success, error on failure.
*/
static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct dma_fence *f = NULL;
unsigned index;
u32 tmp = 0;
u64 gpu_addr;
long r;
r = amdgpu_device_wb_get(adev, &index);
if (r)
return r;
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256,
AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err0;
ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
ib.ptr[1] = lower_32_bits(gpu_addr);
ib.ptr[2] = upper_32_bits(gpu_addr);
ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1);
ib.ptr[4] = 0xDEADBEEF;
ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
ib.length_dw = 8;
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
goto err1;
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
r = -ETIMEDOUT;
goto err1;
} else if (r < 0) {
goto err1;
}
tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
r = 0;
else
r = -EINVAL;
err1:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err0:
amdgpu_device_wb_free(adev, index);
return r;
}
/**
* sdma_v3_0_vm_copy_pte - update PTEs by copying them from the GART
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @src: src addr to copy from
* @count: number of page entries to update
*
* Update PTEs by copying them from the GART using sDMA (CIK).
*/
static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib,
uint64_t pe, uint64_t src,
unsigned count)
{
unsigned bytes = count * 8;
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
ib->ptr[ib->length_dw++] = bytes;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src);
ib->ptr[ib->length_dw++] = upper_32_bits(src);
ib->ptr[ib->length_dw++] = lower_32_bits(pe);
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
}
/**
* sdma_v3_0_vm_write_pte - update PTEs by writing them manually
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @value: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
*
* Update PTEs by writing them manually using sDMA (CIK).
*/
static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
uint64_t value, unsigned count,
uint32_t incr)
{
unsigned ndw = count * 2;
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
ib->ptr[ib->length_dw++] = lower_32_bits(pe);
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = ndw;
for (; ndw > 0; ndw -= 2) {
ib->ptr[ib->length_dw++] = lower_32_bits(value);
ib->ptr[ib->length_dw++] = upper_32_bits(value);
value += incr;
}
}
/**
* sdma_v3_0_vm_set_pte_pde - update the page tables using sDMA
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: access flags
*
* Update the page tables using sDMA (CIK).
*/
static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint64_t flags)
{
/* for physically contiguous pages (vram) */
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE);
ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
ib->ptr[ib->length_dw++] = upper_32_bits(flags);
ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
ib->ptr[ib->length_dw++] = upper_32_bits(addr);
ib->ptr[ib->length_dw++] = incr; /* increment size */
ib->ptr[ib->length_dw++] = 0;
ib->ptr[ib->length_dw++] = count; /* number of entries */
}
/**
* sdma_v3_0_ring_pad_ib - pad the IB to the required number of dw
*
* @ring: amdgpu_ring structure holding ring information
* @ib: indirect buffer to fill with padding
*
*/
static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
u32 pad_count;
int i;
pad_count = (-ib->length_dw) & 7;
for (i = 0; i < pad_count; i++)
if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =
SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
else
ib->ptr[ib->length_dw++] =
SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
}
/**
* sdma_v3_0_ring_emit_pipeline_sync - sync the pipeline
*
* @ring: amdgpu_ring pointer
*
* Make sure all previous operations are completed (CIK).
*/
static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
/* wait for idle */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); /* reference */
amdgpu_ring_write(ring, 0xffffffff); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
}
/**
* sdma_v3_0_ring_emit_vm_flush - cik vm flush using sDMA
*
* @ring: amdgpu_ring pointer
* @vmid: vmid number to use
* @pd_addr: address
*
* Update the page table base and flush the VM TLB
* using sDMA (VI).
*/
static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
/* wait for flush */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0)); /* always */
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 0); /* reference */
amdgpu_ring_write(ring, 0); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
static void sdma_v3_0_ring_emit_wreg(struct amdgpu_ring *ring,
uint32_t reg, uint32_t val)
{
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, val);
}
static int sdma_v3_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
switch (adev->asic_type) {
case CHIP_STONEY:
adev->sdma.num_instances = 1;
break;
default:
adev->sdma.num_instances = SDMA_MAX_INSTANCE;
break;
}
sdma_v3_0_set_ring_funcs(adev);
sdma_v3_0_set_buffer_funcs(adev);
sdma_v3_0_set_vm_pte_funcs(adev);
sdma_v3_0_set_irq_funcs(adev);
return 0;
}
static int sdma_v3_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
int r, i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* SDMA trap event */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
&adev->sdma.trap_irq);
if (r)
return r;
/* SDMA Privileged inst */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
/* SDMA Privileged inst */
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
r = sdma_v3_0_init_microcode(adev);
if (r) {
DRM_ERROR("Failed to load sdma firmware!\n");
return r;
}
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
if (!amdgpu_sriov_vf(adev)) {
ring->use_doorbell = true;
ring->doorbell_index = adev->doorbell_index.sdma_engine[i];
} else {
ring->use_pollmem = true;
}
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
(i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
AMDGPU_SDMA_IRQ_INSTANCE1,
AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
return r;
}
static int sdma_v3_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
sdma_v3_0_free_microcode(adev);
return 0;
}
static int sdma_v3_0_hw_init(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
sdma_v3_0_init_golden_registers(adev);
r = sdma_v3_0_start(adev);
if (r)
return r;
return r;
}
static int sdma_v3_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
sdma_v3_0_ctx_switch_enable(adev, false);
sdma_v3_0_enable(adev, false);
return 0;
}
static int sdma_v3_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return sdma_v3_0_hw_fini(adev);
}
static int sdma_v3_0_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return sdma_v3_0_hw_init(adev);
}
static bool sdma_v3_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 tmp = RREG32(mmSRBM_STATUS2);
if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
SRBM_STATUS2__SDMA1_BUSY_MASK))
return false;
return true;
}
static int sdma_v3_0_wait_for_idle(void *handle)
{
unsigned i;
u32 tmp;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
SRBM_STATUS2__SDMA1_BUSY_MASK);
if (!tmp)
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
static bool sdma_v3_0_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS2);
if ((tmp & SRBM_STATUS2__SDMA_BUSY_MASK) ||
(tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)) {
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
}
if (srbm_soft_reset) {
adev->sdma.srbm_soft_reset = srbm_soft_reset;
return true;
} else {
adev->sdma.srbm_soft_reset = 0;
return false;
}
}
static int sdma_v3_0_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
if (!adev->sdma.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->sdma.srbm_soft_reset;
if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) ||
REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) {
sdma_v3_0_ctx_switch_enable(adev, false);
sdma_v3_0_enable(adev, false);
}
return 0;
}
static int sdma_v3_0_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
if (!adev->sdma.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->sdma.srbm_soft_reset;
if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) ||
REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) {
sdma_v3_0_gfx_resume(adev);
sdma_v3_0_rlc_resume(adev);
}
return 0;
}
static int sdma_v3_0_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
u32 tmp;
if (!adev->sdma.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->sdma.srbm_soft_reset;
if (srbm_soft_reset) {
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(mmSRBM_SOFT_RESET, tmp);
tmp = RREG32(mmSRBM_SOFT_RESET);
udelay(50);
tmp &= ~srbm_soft_reset;
WREG32(mmSRBM_SOFT_RESET, tmp);
tmp = RREG32(mmSRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
}
return 0;
}
static int sdma_v3_0_set_trap_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
u32 sdma_cntl;
switch (type) {
case AMDGPU_SDMA_IRQ_INSTANCE0:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
break;
default:
break;
}
break;
case AMDGPU_SDMA_IRQ_INSTANCE1:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0);
WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1);
WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
break;
default:
break;
}
break;
default:
break;
}
return 0;
}
static int sdma_v3_0_process_trap_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
u8 instance_id, queue_id;
instance_id = (entry->ring_id & 0x3) >> 0;
queue_id = (entry->ring_id & 0xc) >> 2;
DRM_DEBUG("IH: SDMA trap\n");
switch (instance_id) {
case 0:
switch (queue_id) {
case 0:
amdgpu_fence_process(&adev->sdma.instance[0].ring);
break;
case 1:
/* XXX compute */
break;
case 2:
/* XXX compute */
break;
}
break;
case 1:
switch (queue_id) {
case 0:
amdgpu_fence_process(&adev->sdma.instance[1].ring);
break;
case 1:
/* XXX compute */
break;
case 2:
/* XXX compute */
break;
}
break;
}
return 0;
}
static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
u8 instance_id, queue_id;
DRM_ERROR("Illegal instruction in SDMA command stream\n");
instance_id = (entry->ring_id & 0x3) >> 0;
queue_id = (entry->ring_id & 0xc) >> 2;
if (instance_id <= 1 && queue_id == 0)
drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
return 0;
}
static void sdma_v3_0_update_sdma_medium_grain_clock_gating(
struct amdgpu_device *adev,
bool enable)
{
uint32_t temp, data;
int i;
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
for (i = 0; i < adev->sdma.num_instances; i++) {
temp = data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i]);
data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
if (data != temp)
WREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i], data);
}
} else {
for (i = 0; i < adev->sdma.num_instances; i++) {
temp = data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i]);
data |= SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK;
if (data != temp)
WREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i], data);
}
}
}
static void sdma_v3_0_update_sdma_medium_grain_light_sleep(
struct amdgpu_device *adev,
bool enable)
{
uint32_t temp, data;
int i;
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
for (i = 0; i < adev->sdma.num_instances; i++) {
temp = data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i]);
data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (temp != data)
WREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i], data);
}
} else {
for (i = 0; i < adev->sdma.num_instances; i++) {
temp = data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i]);
data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (temp != data)
WREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i], data);
}
}
}
static int sdma_v3_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (amdgpu_sriov_vf(adev))
return 0;
switch (adev->asic_type) {
case CHIP_FIJI:
case CHIP_CARRIZO:
case CHIP_STONEY:
sdma_v3_0_update_sdma_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
sdma_v3_0_update_sdma_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE);
break;
default:
break;
}
return 0;
}
static int sdma_v3_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
return 0;
}
static void sdma_v3_0_get_clockgating_state(void *handle, u64 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int data;
if (amdgpu_sriov_vf(adev))
*flags = 0;
/* AMD_CG_SUPPORT_SDMA_MGCG */
data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[0]);
if (!(data & SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK))
*flags |= AMD_CG_SUPPORT_SDMA_MGCG;
/* AMD_CG_SUPPORT_SDMA_LS */
data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[0]);
if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
*flags |= AMD_CG_SUPPORT_SDMA_LS;
}
static const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
.name = "sdma_v3_0",
.early_init = sdma_v3_0_early_init,
.late_init = NULL,
.sw_init = sdma_v3_0_sw_init,
.sw_fini = sdma_v3_0_sw_fini,
.hw_init = sdma_v3_0_hw_init,
.hw_fini = sdma_v3_0_hw_fini,
.suspend = sdma_v3_0_suspend,
.resume = sdma_v3_0_resume,
.is_idle = sdma_v3_0_is_idle,
.wait_for_idle = sdma_v3_0_wait_for_idle,
.check_soft_reset = sdma_v3_0_check_soft_reset,
.pre_soft_reset = sdma_v3_0_pre_soft_reset,
.post_soft_reset = sdma_v3_0_post_soft_reset,
.soft_reset = sdma_v3_0_soft_reset,
.set_clockgating_state = sdma_v3_0_set_clockgating_state,
.set_powergating_state = sdma_v3_0_set_powergating_state,
.get_clockgating_state = sdma_v3_0_get_clockgating_state,
};
static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
.type = AMDGPU_RING_TYPE_SDMA,
.align_mask = 0xf,
.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
.support_64bit_ptrs = false,
.secure_submission_supported = true,
.get_rptr = sdma_v3_0_ring_get_rptr,
.get_wptr = sdma_v3_0_ring_get_wptr,
.set_wptr = sdma_v3_0_ring_set_wptr,
.emit_frame_size =
6 + /* sdma_v3_0_ring_emit_hdp_flush */
3 + /* hdp invalidate */
6 + /* sdma_v3_0_ring_emit_pipeline_sync */
VI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* sdma_v3_0_ring_emit_vm_flush */
10 + 10 + 10, /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 6, /* sdma_v3_0_ring_emit_ib */
.emit_ib = sdma_v3_0_ring_emit_ib,
.emit_fence = sdma_v3_0_ring_emit_fence,
.emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync,
.emit_vm_flush = sdma_v3_0_ring_emit_vm_flush,
.emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush,
.test_ring = sdma_v3_0_ring_test_ring,
.test_ib = sdma_v3_0_ring_test_ib,
.insert_nop = sdma_v3_0_ring_insert_nop,
.pad_ib = sdma_v3_0_ring_pad_ib,
.emit_wreg = sdma_v3_0_ring_emit_wreg,
};
static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.instance[i].ring.funcs = &sdma_v3_0_ring_funcs;
adev->sdma.instance[i].ring.me = i;
}
}
static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = {
.set = sdma_v3_0_set_trap_irq_state,
.process = sdma_v3_0_process_trap_irq,
};
static const struct amdgpu_irq_src_funcs sdma_v3_0_illegal_inst_irq_funcs = {
.process = sdma_v3_0_process_illegal_inst_irq,
};
static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
adev->sdma.trap_irq.funcs = &sdma_v3_0_trap_irq_funcs;
adev->sdma.illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs;
}
/**
* sdma_v3_0_emit_copy_buffer - copy buffer using the sDMA engine
*
* @ib: indirect buffer to copy to
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
* @tmz: unused
*
* Copy GPU buffers using the DMA engine (VI).
* Used by the amdgpu ttm implementation to move pages if
* registered as the asic copy callback.
*/
static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
uint32_t byte_count,
bool tmz)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
ib->ptr[ib->length_dw++] = byte_count;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
}
/**
* sdma_v3_0_emit_fill_buffer - fill buffer using the sDMA engine
*
* @ib: indirect buffer to copy to
* @src_data: value to write to buffer
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
*
* Fill GPU buffers using the DMA engine (VI).
*/
static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ib *ib,
uint32_t src_data,
uint64_t dst_offset,
uint32_t byte_count)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
ib->ptr[ib->length_dw++] = src_data;
ib->ptr[ib->length_dw++] = byte_count;
}
static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = {
.copy_max_bytes = 0x3fffe0, /* not 0x3fffff due to HW limitation */
.copy_num_dw = 7,
.emit_copy_buffer = sdma_v3_0_emit_copy_buffer,
.fill_max_bytes = 0x3fffe0, /* not 0x3fffff due to HW limitation */
.fill_num_dw = 5,
.emit_fill_buffer = sdma_v3_0_emit_fill_buffer,
};
static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev)
{
adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs;
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
.copy_pte_num_dw = 7,
.copy_pte = sdma_v3_0_vm_copy_pte,
.write_pte = sdma_v3_0_vm_write_pte,
.set_pte_pde = sdma_v3_0_vm_set_pte_pde,
};
static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
{
unsigned i;
adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) {
adev->vm_manager.vm_pte_scheds[i] =
&adev->sdma.instance[i].ring.sched;
}
adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_SDMA,
.major = 3,
.minor = 0,
.rev = 0,
.funcs = &sdma_v3_0_ip_funcs,
};
const struct amdgpu_ip_block_version sdma_v3_1_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_SDMA,
.major = 3,
.minor = 1,
.rev = 0,
.funcs = &sdma_v3_0_ip_funcs,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c |
/*
* Copyright (C) 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "amdgpu.h"
#include "amdgpu_ras.h"
int amdgpu_nbio_ras_sw_init(struct amdgpu_device *adev)
{
int err;
struct amdgpu_nbio_ras *ras;
if (!adev->nbio.ras)
return 0;
ras = adev->nbio.ras;
err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
if (err) {
dev_err(adev->dev, "Failed to register pcie_bif ras block!\n");
return err;
}
strcpy(ras->ras_block.ras_comm.name, "pcie_bif");
ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__PCIE_BIF;
ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
adev->nbio.ras_if = &ras->ras_block.ras_comm;
return 0;
}
u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev)
{
if (adev->nbio.funcs && adev->nbio.funcs->get_pcie_replay_count)
return adev->nbio.funcs->get_pcie_replay_count(adev);
return 0;
}
void amdgpu_nbio_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
uint64_t *count1)
{
if (adev->nbio.funcs->get_pcie_usage)
adev->nbio.funcs->get_pcie_usage(adev, count0, count1);
}
int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
int r;
r = amdgpu_ras_block_late_init(adev, ras_block);
if (r)
return r;
if (amdgpu_ras_is_supported(adev, ras_block->block)) {
r = amdgpu_irq_get(adev, &adev->nbio.ras_controller_irq, 0);
if (r)
goto late_fini;
r = amdgpu_irq_get(adev, &adev->nbio.ras_err_event_athub_irq, 0);
if (r)
goto late_fini;
}
return 0;
late_fini:
amdgpu_ras_block_late_fini(adev, ras_block);
return r;
}
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c |
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/pci.h>
#include "amdgpu.h"
#include "amdgpu_ih.h"
#include "vid.h"
#include "oss/oss_3_0_1_d.h"
#include "oss/oss_3_0_1_sh_mask.h"
#include "bif/bif_5_1_d.h"
#include "bif/bif_5_1_sh_mask.h"
/*
* Interrupts
* Starting with r6xx, interrupts are handled via a ring buffer.
* Ring buffers are areas of GPU accessible memory that the GPU
* writes interrupt vectors into and the host reads vectors out of.
* There is a rptr (read pointer) that determines where the
* host is currently reading, and a wptr (write pointer)
* which determines where the GPU has written. When the
* pointers are equal, the ring is idle. When the GPU
* writes vectors to the ring buffer, it increments the
* wptr. When there is an interrupt, the host then starts
* fetching commands and processing them until the pointers are
* equal again at which point it updates the rptr.
*/
static void cz_ih_set_interrupt_funcs(struct amdgpu_device *adev);
/**
* cz_ih_enable_interrupts - Enable the interrupt ring buffer
*
* @adev: amdgpu_device pointer
*
* Enable the interrupt ring buffer (VI).
*/
static void cz_ih_enable_interrupts(struct amdgpu_device *adev)
{
u32 ih_cntl = RREG32(mmIH_CNTL);
u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, ENABLE_INTR, 1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
WREG32(mmIH_CNTL, ih_cntl);
WREG32(mmIH_RB_CNTL, ih_rb_cntl);
adev->irq.ih.enabled = true;
}
/**
* cz_ih_disable_interrupts - Disable the interrupt ring buffer
*
* @adev: amdgpu_device pointer
*
* Disable the interrupt ring buffer (VI).
*/
static void cz_ih_disable_interrupts(struct amdgpu_device *adev)
{
u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL);
u32 ih_cntl = RREG32(mmIH_CNTL);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, ENABLE_INTR, 0);
WREG32(mmIH_RB_CNTL, ih_rb_cntl);
WREG32(mmIH_CNTL, ih_cntl);
/* set rptr, wptr to 0 */
WREG32(mmIH_RB_RPTR, 0);
WREG32(mmIH_RB_WPTR, 0);
adev->irq.ih.enabled = false;
adev->irq.ih.rptr = 0;
}
/**
* cz_ih_irq_init - init and enable the interrupt ring
*
* @adev: amdgpu_device pointer
*
* Allocate a ring buffer for the interrupt controller,
* enable the RLC, disable interrupts, enable the IH
* ring buffer and enable it (VI).
* Called at device load and reume.
* Returns 0 for success, errors for failure.
*/
static int cz_ih_irq_init(struct amdgpu_device *adev)
{
struct amdgpu_ih_ring *ih = &adev->irq.ih;
u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
int rb_bufsz;
/* disable irqs */
cz_ih_disable_interrupts(adev);
/* setup interrupt control */
WREG32(mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
interrupt_cntl = RREG32(mmINTERRUPT_CNTL);
/* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
*/
interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
/* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
WREG32(mmINTERRUPT_CNTL, interrupt_cntl);
/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
WREG32(mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8);
rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
ih_rb_cntl = REG_SET_FIELD(0, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
/* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register value is written to memory */
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1);
/* set the writeback address whether it's enabled or not */
WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr));
WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF);
WREG32(mmIH_RB_CNTL, ih_rb_cntl);
/* set rptr, wptr to 0 */
WREG32(mmIH_RB_RPTR, 0);
WREG32(mmIH_RB_WPTR, 0);
/* Default settings for IH_CNTL (disabled at first) */
ih_cntl = RREG32(mmIH_CNTL);
ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, MC_VMID, 0);
if (adev->irq.msi_enabled)
ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, RPTR_REARM, 1);
WREG32(mmIH_CNTL, ih_cntl);
pci_set_master(adev->pdev);
/* enable interrupts */
cz_ih_enable_interrupts(adev);
return 0;
}
/**
* cz_ih_irq_disable - disable interrupts
*
* @adev: amdgpu_device pointer
*
* Disable interrupts on the hw (VI).
*/
static void cz_ih_irq_disable(struct amdgpu_device *adev)
{
cz_ih_disable_interrupts(adev);
/* Wait and acknowledge irq */
mdelay(1);
}
/**
* cz_ih_get_wptr - get the IH ring buffer wptr
*
* @adev: amdgpu_device pointer
* @ih: IH ring buffer to fetch wptr
*
* Get the IH ring buffer wptr from either the register
* or the writeback memory buffer (VI). Also check for
* ring buffer overflow and deal with it.
* Used by cz_irq_process(VI).
* Returns the value of the wptr.
*/
static u32 cz_ih_get_wptr(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
u32 wptr, tmp;
wptr = le32_to_cpu(*ih->wptr_cpu);
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
/* Double check that the overflow wasn't already cleared. */
wptr = RREG32(mmIH_RB_WPTR);
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
/* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 16). Hopefully
* this should allow us to catchup.
*/
dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
ih->rptr = (wptr + 16) & ih->ptr_mask;
tmp = RREG32(mmIH_RB_CNTL);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32(mmIH_RB_CNTL, tmp);
out:
return (wptr & ih->ptr_mask);
}
/**
* cz_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
* @ih: IH ring buffer to decode
* @entry: IV entry to place decoded information into
*
* Decodes the interrupt vector at the current rptr
* position and also advance the position.
*/
static void cz_ih_decode_iv(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih,
struct amdgpu_iv_entry *entry)
{
/* wptr/rptr are in bytes! */
u32 ring_index = ih->rptr >> 2;
uint32_t dw[4];
dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
entry->src_id = dw[0] & 0xff;
entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff;
entry->vmid = (dw[2] >> 8) & 0xff;
entry->pasid = (dw[2] >> 16) & 0xffff;
/* wptr/rptr are in bytes! */
ih->rptr += 16;
}
/**
* cz_ih_set_rptr - set the IH ring buffer rptr
*
* @adev: amdgpu_device pointer
* @ih: IH ring buffer to set rptr
*
* Set the IH ring buffer rptr.
*/
static void cz_ih_set_rptr(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
WREG32(mmIH_RB_RPTR, ih->rptr);
}
static int cz_ih_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
ret = amdgpu_irq_add_domain(adev);
if (ret)
return ret;
cz_ih_set_interrupt_funcs(adev);
return 0;
}
static int cz_ih_sw_init(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
if (r)
return r;
r = amdgpu_irq_init(adev);
return r;
}
static int cz_ih_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_fini_sw(adev);
amdgpu_irq_remove_domain(adev);
return 0;
}
static int cz_ih_hw_init(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = cz_ih_irq_init(adev);
if (r)
return r;
return 0;
}
static int cz_ih_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
cz_ih_irq_disable(adev);
return 0;
}
static int cz_ih_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return cz_ih_hw_fini(adev);
}
static int cz_ih_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return cz_ih_hw_init(adev);
}
static bool cz_ih_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 tmp = RREG32(mmSRBM_STATUS);
if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY))
return false;
return true;
}
static int cz_ih_wait_for_idle(void *handle)
{
unsigned i;
u32 tmp;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
/* read MC_STATUS */
tmp = RREG32(mmSRBM_STATUS);
if (!REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY))
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
static int cz_ih_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__IH_BUSY_MASK)
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET,
SOFT_RESET_IH, 1);
if (srbm_soft_reset) {
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(mmSRBM_SOFT_RESET, tmp);
tmp = RREG32(mmSRBM_SOFT_RESET);
udelay(50);
tmp &= ~srbm_soft_reset;
WREG32(mmSRBM_SOFT_RESET, tmp);
tmp = RREG32(mmSRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
}
return 0;
}
static int cz_ih_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
// TODO
return 0;
}
static int cz_ih_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
// TODO
return 0;
}
static const struct amd_ip_funcs cz_ih_ip_funcs = {
.name = "cz_ih",
.early_init = cz_ih_early_init,
.late_init = NULL,
.sw_init = cz_ih_sw_init,
.sw_fini = cz_ih_sw_fini,
.hw_init = cz_ih_hw_init,
.hw_fini = cz_ih_hw_fini,
.suspend = cz_ih_suspend,
.resume = cz_ih_resume,
.is_idle = cz_ih_is_idle,
.wait_for_idle = cz_ih_wait_for_idle,
.soft_reset = cz_ih_soft_reset,
.set_clockgating_state = cz_ih_set_clockgating_state,
.set_powergating_state = cz_ih_set_powergating_state,
};
static const struct amdgpu_ih_funcs cz_ih_funcs = {
.get_wptr = cz_ih_get_wptr,
.decode_iv = cz_ih_decode_iv,
.set_rptr = cz_ih_set_rptr
};
static void cz_ih_set_interrupt_funcs(struct amdgpu_device *adev)
{
adev->irq.ih_funcs = &cz_ih_funcs;
}
const struct amdgpu_ip_block_version cz_ih_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_IH,
.major = 3,
.minor = 0,
.rev = 0,
.funcs = &cz_ih_ip_funcs,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/cz_ih.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/kernel.h>
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "soc15.h"
#include "soc15d.h"
#include "amdgpu_atomfirmware.h"
#include "amdgpu_pm.h"
#include "gc/gc_9_4_1_offset.h"
#include "gc/gc_9_4_1_sh_mask.h"
#include "soc15_common.h"
#include "gfx_v9_4.h"
#include "amdgpu_ras.h"
static const struct soc15_reg_entry gfx_v9_4_edc_counter_regs[] = {
/* CPC */
{ SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1 },
{ SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1 },
/* DC */
{ SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, 1 },
{ SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, 1 },
{ SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, 1 },
/* CPF */
{ SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1 },
{ SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, 1 },
/* GDS */
{ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, 1 },
{ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), 0, 1, 1 },
{ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED), 0, 1, 1 },
{ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, 1 },
{ SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, 1 },
/* SPI */
{ SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1 },
/* SQ */
{ SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 8, 16 },
{ SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 8, 16 },
{ SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 8, 16 },
{ SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 8, 16 },
/* SQC */
{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6 },
{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6 },
{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6 },
{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), 0, 4, 6 },
/* TA */
{ SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 0, 4, 16 },
/* TCA */
{ SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2 },
/* TCC */
{ SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16 },
{ SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16 },
/* TCI */
{ SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 1, 72 },
/* TCP */
{ SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16 },
{ SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16 },
{ SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16 },
/* TD */
{ SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16 },
/* GCEA */
{ SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 1, 32 },
{ SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 1, 32 },
{ SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 1, 32 },
/* RLC */
{ SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), 0, 1, 1 },
{ SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), 0, 1, 1 },
};
static void gfx_v9_4_select_se_sh(struct amdgpu_device *adev, u32 se_num,
u32 sh_num, u32 instance)
{
u32 data;
if (instance == 0xffffffff)
data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
INSTANCE_BROADCAST_WRITES, 1);
else
data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
instance);
if (se_num == 0xffffffff)
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
1);
else
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
if (sh_num == 0xffffffff)
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES,
1);
else
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
}
static const struct soc15_ras_field_entry gfx_v9_4_ras_fields[] = {
/* CPC */
{ "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT) },
{ "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT),
SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT) },
{ "CPC_DC_STATE_RAM_ME1", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT),
SOC15_REG_FIELD(DC_EDC_STATE_CNT, SEC_COUNT_ME1),
SOC15_REG_FIELD(DC_EDC_STATE_CNT, DED_COUNT_ME1) },
{ "CPC_DC_CSINVOC_RAM_ME1",
SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT_ME1),
SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT_ME1) },
{ "CPC_DC_RESTORE_RAM_ME1",
SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT_ME1),
SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT_ME1) },
{ "CPC_DC_CSINVOC_RAM1_ME1",
SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT1_ME1),
SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT1_ME1) },
{ "CPC_DC_RESTORE_RAM1_ME1",
SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT1_ME1),
SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT1_ME1) },
/* CPF */
{ "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME2),
SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME2) },
{ "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME1),
SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME1) },
{ "CPF_TCIU_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT),
SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT) },
/* GDS */
{ "GDS_GRBM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT),
SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, SEC),
SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, DED) },
{ "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED) },
{ "GDS_PHY_CMD_RAM_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED) },
{ "GDS_PHY_DATA_RAM_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SEC),
SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_DED) },
{ "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED) },
{ "GDS_ME1_PIPE0_PIPE_MEM",
SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED) },
{ "GDS_ME1_PIPE1_PIPE_MEM",
SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED) },
{ "GDS_ME1_PIPE2_PIPE_MEM",
SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED) },
{ "GDS_ME1_PIPE3_PIPE_MEM",
SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED) },
/* SPI */
{ "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SEC_COUNT),
SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_DED_COUNT) },
{ "SPI_GDS_EXPREQ", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_SEC_COUNT),
SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_DED_COUNT) },
{ "SPI_WB_GRANT_30", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_SEC_COUNT),
SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_DED_COUNT) },
{ "SPI_WB_GRANT_61", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_61_SEC_COUNT),
SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_61_DED_COUNT) },
{ "SPI_LIFE_CNT", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_SEC_COUNT),
SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_DED_COUNT) },
/* SQ */
{ "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT) },
{ "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT) },
{ "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT) },
{ "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT) },
{ "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT) },
{ "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT) },
{ "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT) },
/* SQC */
{ "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT) },
{ "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT) },
{ "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT) },
{ "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT) },
{ "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT) },
{ "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT) },
{ "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT) },
{ "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT) },
{ "SQC_INST_BANKA_UTCL1_MISS_FIFO",
SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT) },
{ "SQC_INST_BANKA_MISS_FIFO",
SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKA_MISS_FIFO_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
INST_BANKA_MISS_FIFO_DED_COUNT) },
{ "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT) },
{ "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT) },
{ "SQC_DATA_BANKA_HIT_FIFO",
SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_DED_COUNT) },
{ "SQC_DATA_BANKA_MISS_FIFO",
SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_MISS_FIFO_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
DATA_BANKA_MISS_FIFO_DED_COUNT) },
{ "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT) },
{ "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT) },
{ "SQC_INST_BANKB_UTCL1_MISS_FIFO",
SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT) },
{ "SQC_INST_BANKB_MISS_FIFO",
SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKB_MISS_FIFO_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
INST_BANKB_MISS_FIFO_DED_COUNT) },
{ "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT) },
{ "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT) },
{ "SQC_DATA_BANKB_HIT_FIFO",
SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_DED_COUNT) },
{ "SQC_DATA_BANKB_MISS_FIFO",
SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_MISS_FIFO_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3,
DATA_BANKB_MISS_FIFO_DED_COUNT) },
{ "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT) },
/* TA */
{ "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT) },
{ "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SEC_COUNT),
SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_DED_COUNT) },
{ "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SEC_COUNT),
SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_DED_COUNT) },
{ "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SEC_COUNT),
SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_DED_COUNT) },
{ "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SEC_COUNT),
SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_DED_COUNT) },
/* TCA */
{ "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SEC_COUNT),
SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_DED_COUNT) },
{ "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SEC_COUNT),
SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_DED_COUNT) },
/* TCC */
{ "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT) },
{ "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT) },
{ "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT) },
{ "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT) },
{ "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_DED_COUNT) },
{ "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_DED_COUNT) },
{ "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_DED_COUNT) },
{ "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_DED_COUNT) },
{ "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_DED_COUNT) },
{ "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_DED_COUNT) },
{ "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_DED_COUNT) },
{ "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT) },
{ "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_DED_COUNT) },
{ "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_DED_COUNT) },
{ "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_DED_COUNT) },
/* TCI */
{ "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT),
SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SEC_COUNT),
SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_DED_COUNT) },
/* TCP */
{ "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT) },
{ "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT) },
{ "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SEC_COUNT),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_DED_COUNT) },
{ "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_DED_COUNT) },
{ "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT), 0, 0 },
{ "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT) },
{ "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT) },
/* TD */
{ "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT) },
{ "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT) },
{ "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SEC_COUNT),
SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_DED_COUNT) },
/* EA */
{ "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT) },
{ "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT) },
{ "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT) },
{ "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT) },
{ "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT) },
{ "EA_GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT) },
{ "EA_GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT) },
{ "EA_GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT) },
{ "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 0, 0 },
{ "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT) },
{ "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 0, 0 },
{ "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT) },
{ "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT), 0, 0 },
{ "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
SOC15_REG_FIELD(GCEA_EDC_CNT3, IORD_CMDMEM_DED_COUNT) },
{ "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 0, 0 },
{ "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
SOC15_REG_FIELD(GCEA_EDC_CNT3, IOWR_CMDMEM_DED_COUNT) },
{ "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT), 0, 0 },
{ "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
SOC15_REG_FIELD(GCEA_EDC_CNT3, IOWR_DATAMEM_DED_COUNT) },
{ "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 0, 0 },
{ "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT) },
{ "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 0, 0 },
{ "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT) },
{ "EA_MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_DED_COUNT) },
{ "EA_MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_DED_COUNT) },
{ "EA_MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_DED_COUNT) },
{ "EA_MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_DED_COUNT) },
{ "EA_MAM_A0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3),
SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_DED_COUNT) },
{ "EA_MAM_A1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3),
SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_DED_COUNT) },
{ "EA_MAM_A2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3),
SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_DED_COUNT) },
{ "EA_MAM_A3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3),
SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_DED_COUNT) },
{ "EA_MAM_AFMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, MAM_AFMEM_SEC_COUNT), 0, 0 },
{ "EA_MAM_AFMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0,
SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_AFMEM_DED_COUNT) },
/* RLC */
{ "RLCG_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_DED_COUNT) },
{ "RLCG_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_DED_COUNT) },
{ "RLCV_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_DED_COUNT) },
{ "RLCV_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_DED_COUNT) },
{ "RLC_TCTAG_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_DED_COUNT) },
{ "RLC_SPM_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_DED_COUNT) },
{ "RLC_SRM_DATA_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_DED_COUNT) },
{ "RLC_SRM_ADDR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_DED_COUNT) },
{ "RLC_SPM_SE0_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT) },
{ "RLC_SPM_SE1_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT) },
{ "RLC_SPM_SE2_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT) },
{ "RLC_SPM_SE3_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT) },
{ "RLC_SPM_SE4_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT) },
{ "RLC_SPM_SE5_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT) },
{ "RLC_SPM_SE6_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT) },
{ "RLC_SPM_SE7_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT) },
};
static const char * const vml2_mems[] = {
"UTC_VML2_BANK_CACHE_0_BIGK_MEM0",
"UTC_VML2_BANK_CACHE_0_BIGK_MEM1",
"UTC_VML2_BANK_CACHE_0_4K_MEM0",
"UTC_VML2_BANK_CACHE_0_4K_MEM1",
"UTC_VML2_BANK_CACHE_1_BIGK_MEM0",
"UTC_VML2_BANK_CACHE_1_BIGK_MEM1",
"UTC_VML2_BANK_CACHE_1_4K_MEM0",
"UTC_VML2_BANK_CACHE_1_4K_MEM1",
"UTC_VML2_BANK_CACHE_2_BIGK_MEM0",
"UTC_VML2_BANK_CACHE_2_BIGK_MEM1",
"UTC_VML2_BANK_CACHE_2_4K_MEM0",
"UTC_VML2_BANK_CACHE_2_4K_MEM1",
"UTC_VML2_BANK_CACHE_3_BIGK_MEM0",
"UTC_VML2_BANK_CACHE_3_BIGK_MEM1",
"UTC_VML2_BANK_CACHE_3_4K_MEM0",
"UTC_VML2_BANK_CACHE_3_4K_MEM1",
"UTC_VML2_IFIFO_GROUP0",
"UTC_VML2_IFIFO_GROUP1",
"UTC_VML2_IFIFO_GROUP2",
"UTC_VML2_IFIFO_GROUP3",
"UTC_VML2_IFIFO_GROUP4",
"UTC_VML2_IFIFO_GROUP5",
"UTC_VML2_IFIFO_GROUP6",
"UTC_VML2_IFIFO_GROUP7",
"UTC_VML2_IFIFO_GROUP8",
"UTC_VML2_IFIFO_GROUP9",
"UTC_VML2_IFIFO_GROUP10",
"UTC_VML2_IFIFO_GROUP11",
"UTC_VML2_IFIFO_GROUP12",
"UTC_VML2_IFIFO_GROUP13",
"UTC_VML2_IFIFO_GROUP14",
"UTC_VML2_IFIFO_GROUP15",
"UTC_VML2_IFIFO_GROUP16",
"UTC_VML2_IFIFO_GROUP17",
"UTC_VML2_IFIFO_GROUP18",
"UTC_VML2_IFIFO_GROUP19",
"UTC_VML2_IFIFO_GROUP20",
"UTC_VML2_IFIFO_GROUP21",
"UTC_VML2_IFIFO_GROUP22",
"UTC_VML2_IFIFO_GROUP23",
"UTC_VML2_IFIFO_GROUP24",
};
static const char * const vml2_walker_mems[] = {
"UTC_VML2_CACHE_PDE0_MEM0",
"UTC_VML2_CACHE_PDE0_MEM1",
"UTC_VML2_CACHE_PDE1_MEM0",
"UTC_VML2_CACHE_PDE1_MEM1",
"UTC_VML2_CACHE_PDE2_MEM0",
"UTC_VML2_CACHE_PDE2_MEM1",
"UTC_VML2_RDIF_ARADDRS",
"UTC_VML2_RDIF_LOG_FIFO",
"UTC_VML2_QUEUE_REQ",
"UTC_VML2_QUEUE_RET",
};
static const char * const utcl2_router_mems[] = {
"UTCL2_ROUTER_GROUP0_VML2_REQ_FIFO0",
"UTCL2_ROUTER_GROUP1_VML2_REQ_FIFO1",
"UTCL2_ROUTER_GROUP2_VML2_REQ_FIFO2",
"UTCL2_ROUTER_GROUP3_VML2_REQ_FIFO3",
"UTCL2_ROUTER_GROUP4_VML2_REQ_FIFO4",
"UTCL2_ROUTER_GROUP5_VML2_REQ_FIFO5",
"UTCL2_ROUTER_GROUP6_VML2_REQ_FIFO6",
"UTCL2_ROUTER_GROUP7_VML2_REQ_FIFO7",
"UTCL2_ROUTER_GROUP8_VML2_REQ_FIFO8",
"UTCL2_ROUTER_GROUP9_VML2_REQ_FIFO9",
"UTCL2_ROUTER_GROUP10_VML2_REQ_FIFO10",
"UTCL2_ROUTER_GROUP11_VML2_REQ_FIFO11",
"UTCL2_ROUTER_GROUP12_VML2_REQ_FIFO12",
"UTCL2_ROUTER_GROUP13_VML2_REQ_FIFO13",
"UTCL2_ROUTER_GROUP14_VML2_REQ_FIFO14",
"UTCL2_ROUTER_GROUP15_VML2_REQ_FIFO15",
"UTCL2_ROUTER_GROUP16_VML2_REQ_FIFO16",
"UTCL2_ROUTER_GROUP17_VML2_REQ_FIFO17",
"UTCL2_ROUTER_GROUP18_VML2_REQ_FIFO18",
"UTCL2_ROUTER_GROUP19_VML2_REQ_FIFO19",
"UTCL2_ROUTER_GROUP20_VML2_REQ_FIFO20",
"UTCL2_ROUTER_GROUP21_VML2_REQ_FIFO21",
"UTCL2_ROUTER_GROUP22_VML2_REQ_FIFO22",
"UTCL2_ROUTER_GROUP23_VML2_REQ_FIFO23",
"UTCL2_ROUTER_GROUP24_VML2_REQ_FIFO24",
};
static const char * const atc_l2_cache_2m_mems[] = {
"UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM",
"UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM",
"UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM",
"UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM",
};
static const char * const atc_l2_cache_4k_mems[] = {
"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0",
"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1",
"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2",
"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3",
"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4",
"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5",
"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6",
"UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7",
"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0",
"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1",
"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2",
"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3",
"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4",
"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5",
"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6",
"UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7",
"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0",
"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1",
"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2",
"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3",
"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4",
"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5",
"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6",
"UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7",
"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0",
"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1",
"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2",
"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3",
"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4",
"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5",
"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6",
"UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7",
};
static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
struct ras_err_data *err_data)
{
uint32_t i, data;
uint32_t sec_count, ded_count;
WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255);
WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL, 0);
WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255);
WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL, 0);
WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255);
WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL, 0);
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255);
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0);
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0);
for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, i);
data = RREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL);
sec_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, SEC_COUNT);
if (sec_count) {
dev_info(adev->dev,
"Instance[%d]: SubBlock %s, SEC %d\n", i,
vml2_mems[i], sec_count);
err_data->ce_count += sec_count;
}
ded_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, DED_COUNT);
if (ded_count) {
dev_info(adev->dev,
"Instance[%d]: SubBlock %s, DED %d\n", i,
vml2_mems[i], ded_count);
err_data->ue_count += ded_count;
}
}
for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, i);
data = RREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL);
sec_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL,
SEC_COUNT);
if (sec_count) {
dev_info(adev->dev,
"Instance[%d]: SubBlock %s, SEC %d\n", i,
vml2_walker_mems[i], sec_count);
err_data->ce_count += sec_count;
}
ded_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL,
DED_COUNT);
if (ded_count) {
dev_info(adev->dev,
"Instance[%d]: SubBlock %s, DED %d\n", i,
vml2_walker_mems[i], ded_count);
err_data->ue_count += ded_count;
}
}
for (i = 0; i < ARRAY_SIZE(utcl2_router_mems); i++) {
WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, i);
data = RREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL);
sec_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, SEC_COUNT);
if (sec_count) {
dev_info(adev->dev,
"Instance[%d]: SubBlock %s, SEC %d\n", i,
utcl2_router_mems[i], sec_count);
err_data->ce_count += sec_count;
}
ded_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, DED_COUNT);
if (ded_count) {
dev_info(adev->dev,
"Instance[%d]: SubBlock %s, DED %d\n", i,
utcl2_router_mems[i], ded_count);
err_data->ue_count += ded_count;
}
}
for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, i);
data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL);
sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL,
SEC_COUNT);
if (sec_count) {
dev_info(adev->dev,
"Instance[%d]: SubBlock %s, SEC %d\n", i,
atc_l2_cache_2m_mems[i], sec_count);
err_data->ce_count += sec_count;
}
ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL,
DED_COUNT);
if (ded_count) {
dev_info(adev->dev,
"Instance[%d]: SubBlock %s, DED %d\n", i,
atc_l2_cache_2m_mems[i], ded_count);
err_data->ue_count += ded_count;
}
}
for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, i);
data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_CNTL);
sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL,
SEC_COUNT);
if (sec_count) {
dev_info(adev->dev,
"Instance[%d]: SubBlock %s, SEC %d\n", i,
atc_l2_cache_4k_mems[i], sec_count);
err_data->ce_count += sec_count;
}
ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL,
DED_COUNT);
if (ded_count) {
dev_info(adev->dev,
"Instance[%d]: SubBlock %s, DED %d\n", i,
atc_l2_cache_4k_mems[i], ded_count);
err_data->ue_count += ded_count;
}
}
WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255);
WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255);
WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255);
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255);
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
return 0;
}
static int gfx_v9_4_ras_error_count(struct amdgpu_device *adev,
const struct soc15_reg_entry *reg,
uint32_t se_id, uint32_t inst_id,
uint32_t value, uint32_t *sec_count,
uint32_t *ded_count)
{
uint32_t i;
uint32_t sec_cnt, ded_cnt;
for (i = 0; i < ARRAY_SIZE(gfx_v9_4_ras_fields); i++) {
if (gfx_v9_4_ras_fields[i].reg_offset != reg->reg_offset ||
gfx_v9_4_ras_fields[i].seg != reg->seg ||
gfx_v9_4_ras_fields[i].inst != reg->inst)
continue;
sec_cnt = (value & gfx_v9_4_ras_fields[i].sec_count_mask) >>
gfx_v9_4_ras_fields[i].sec_count_shift;
if (sec_cnt) {
dev_info(adev->dev,
"GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
gfx_v9_4_ras_fields[i].name, se_id, inst_id,
sec_cnt);
*sec_count += sec_cnt;
}
ded_cnt = (value & gfx_v9_4_ras_fields[i].ded_count_mask) >>
gfx_v9_4_ras_fields[i].ded_count_shift;
if (ded_cnt) {
dev_info(adev->dev,
"GFX SubBlock %s, Instance[%d][%d], DED %d\n",
gfx_v9_4_ras_fields[i].name, se_id, inst_id,
ded_cnt);
*ded_count += ded_cnt;
}
}
return 0;
}
static void gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
uint32_t sec_count = 0, ded_count = 0;
uint32_t i, j, k;
uint32_t reg_value;
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
return;
err_data->ue_count = 0;
err_data->ce_count = 0;
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < ARRAY_SIZE(gfx_v9_4_edc_counter_regs); i++) {
for (j = 0; j < gfx_v9_4_edc_counter_regs[i].se_num; j++) {
for (k = 0; k < gfx_v9_4_edc_counter_regs[i].instance;
k++) {
gfx_v9_4_select_se_sh(adev, j, 0, k);
reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(
gfx_v9_4_edc_counter_regs[i]));
if (reg_value)
gfx_v9_4_ras_error_count(adev,
&gfx_v9_4_edc_counter_regs[i],
j, k, reg_value, &sec_count,
&ded_count);
}
}
}
err_data->ce_count += sec_count;
err_data->ue_count += ded_count;
gfx_v9_4_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
gfx_v9_4_query_utc_edc_status(adev, err_data);
}
static void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
{
int i, j, k;
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
return;
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < ARRAY_SIZE(gfx_v9_4_edc_counter_regs); i++) {
for (j = 0; j < gfx_v9_4_edc_counter_regs[i].se_num; j++) {
for (k = 0; k < gfx_v9_4_edc_counter_regs[i].instance;
k++) {
gfx_v9_4_select_se_sh(adev, j, 0x0, k);
RREG32(SOC15_REG_ENTRY_OFFSET(
gfx_v9_4_edc_counter_regs[i]));
}
}
}
WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
mutex_unlock(&adev->grbm_idx_mutex);
WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255);
WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL, 0);
WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255);
WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL, 0);
WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255);
WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL, 0);
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255);
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0);
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0);
for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, i);
RREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL);
}
for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, i);
RREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL);
}
for (i = 0; i < ARRAY_SIZE(utcl2_router_mems); i++) {
WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, i);
RREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL);
}
for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, i);
RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL);
}
for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, i);
RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_CNTL);
}
WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255);
WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255);
WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255);
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255);
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
}
static const struct soc15_reg_entry gfx_v9_4_ea_err_status_regs =
{ SOC15_REG_ENTRY(GC, 0, mmGCEA_ERR_STATUS), 0, 1, 32 };
static void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev)
{
uint32_t i, j;
uint32_t reg_value;
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
return;
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < gfx_v9_4_ea_err_status_regs.se_num; i++) {
for (j = 0; j < gfx_v9_4_ea_err_status_regs.instance;
j++) {
gfx_v9_4_select_se_sh(adev, i, 0, j);
reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(
gfx_v9_4_ea_err_status_regs));
if (REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_STATUS) ||
REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_WRRSP_STATUS) ||
REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) {
/* SDP read/write error/parity error in FUE_IS_FATAL mode
* can cause system fatal error in arcturas. Harvest the error
* status before GPU reset */
dev_warn(adev->dev, "GCEA err detected at instance: %d, status: 0x%x!\n",
j, reg_value);
}
}
}
gfx_v9_4_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
}
const struct amdgpu_ras_block_hw_ops gfx_v9_4_ras_ops = {
.query_ras_error_count = &gfx_v9_4_query_ras_error_count,
.reset_ras_error_count = &gfx_v9_4_reset_ras_error_count,
.query_ras_error_status = &gfx_v9_4_query_ras_error_status,
};
struct amdgpu_gfx_ras gfx_v9_4_ras = {
.ras_block = {
.hw_ops = &gfx_v9_4_ras_ops,
},
};
| linux-master | drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c |
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/pci.h>
#include "amdgpu.h"
#include "amdgpu_ih.h"
#include "oss/osssys_6_1_0_offset.h"
#include "oss/osssys_6_1_0_sh_mask.h"
#include "soc15_common.h"
#include "ih_v6_1.h"
#define MAX_REARM_RETRY 10
static void ih_v6_1_set_interrupt_funcs(struct amdgpu_device *adev);
/**
* ih_v6_1_init_register_offset - Initialize register offset for ih rings
*
* @adev: amdgpu_device pointer
*
* Initialize register offset ih rings (IH_V6_0).
*/
static void ih_v6_1_init_register_offset(struct amdgpu_device *adev)
{
struct amdgpu_ih_regs *ih_regs;
/* ih ring 2 is removed
* ih ring and ih ring 1 are available */
if (adev->irq.ih.ring_size) {
ih_regs = &adev->irq.ih.ih_regs;
ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE);
ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_HI);
ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_CNTL);
ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR);
ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_RPTR);
ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_DOORBELL_RPTR);
ih_regs->ih_rb_wptr_addr_lo = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_ADDR_LO);
ih_regs->ih_rb_wptr_addr_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_ADDR_HI);
ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL;
}
if (adev->irq.ih1.ring_size) {
ih_regs = &adev->irq.ih1.ih_regs;
ih_regs->ih_rb_base = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_RING1);
ih_regs->ih_rb_base_hi = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_BASE_HI_RING1);
ih_regs->ih_rb_cntl = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_CNTL_RING1);
ih_regs->ih_rb_wptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_WPTR_RING1);
ih_regs->ih_rb_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RB_RPTR_RING1);
ih_regs->ih_doorbell_rptr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_DOORBELL_RPTR_RING1);
ih_regs->psp_reg_id = PSP_REG_IH_RB_CNTL_RING1;
}
}
/**
* force_update_wptr_for_self_int - Force update the wptr for self interrupt
*
* @adev: amdgpu_device pointer
* @threshold: threshold to trigger the wptr reporting
* @timeout: timeout to trigger the wptr reporting
* @enabled: Enable/disable timeout flush mechanism
*
* threshold input range: 0 ~ 15, default 0,
* real_threshold = 2^threshold
* timeout input range: 0 ~ 20, default 8,
* real_timeout = (2^timeout) * 1024 / (socclk_freq)
*
* Force update wptr for self interrupt ( >= SIENNA_CICHLID).
*/
static void
force_update_wptr_for_self_int(struct amdgpu_device *adev,
u32 threshold, u32 timeout, bool enabled)
{
u32 ih_cntl, ih_rb_cntl;
ih_cntl = RREG32_SOC15(OSSSYS, 0, regIH_CNTL2);
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1);
ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
SELF_IV_FORCE_WPTR_UPDATE_TIMEOUT, timeout);
ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL2,
SELF_IV_FORCE_WPTR_UPDATE_ENABLE, enabled);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
RB_USED_INT_THRESHOLD, threshold);
if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, ih_rb_cntl))
return;
} else {
WREG32_SOC15(OSSSYS, 0, regIH_RB_CNTL_RING1, ih_rb_cntl);
}
WREG32_SOC15(OSSSYS, 0, regIH_CNTL2, ih_cntl);
}
/**
* ih_v6_1_toggle_ring_interrupts - toggle the interrupt ring buffer
*
* @adev: amdgpu_device pointer
* @ih: amdgpu_ih_ring pointer
* @enable: true - enable the interrupts, false - disable the interrupts
*
* Toggle the interrupt ring buffer (IH_V6_0)
*/
static int ih_v6_1_toggle_ring_interrupts(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih,
bool enable)
{
struct amdgpu_ih_regs *ih_regs;
uint32_t tmp;
ih_regs = &ih->ih_regs;
tmp = RREG32(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
/* enable_intr field is only valid in ring0 */
if (ih == &adev->irq.ih)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp))
return -ETIMEDOUT;
} else {
WREG32(ih_regs->ih_rb_cntl, tmp);
}
if (enable) {
ih->enabled = true;
} else {
/* set rptr, wptr to 0 */
WREG32(ih_regs->ih_rb_rptr, 0);
WREG32(ih_regs->ih_rb_wptr, 0);
ih->enabled = false;
ih->rptr = 0;
}
return 0;
}
/**
* ih_v6_1_toggle_interrupts - Toggle all the available interrupt ring buffers
*
* @adev: amdgpu_device pointer
* @enable: enable or disable interrupt ring buffers
*
* Toggle all the available interrupt ring buffers (IH_V6_0).
*/
static int ih_v6_1_toggle_interrupts(struct amdgpu_device *adev, bool enable)
{
struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1};
int i;
int r;
for (i = 0; i < ARRAY_SIZE(ih); i++) {
if (ih[i]->ring_size) {
r = ih_v6_1_toggle_ring_interrupts(adev, ih[i], enable);
if (r)
return r;
}
}
return 0;
}
static uint32_t ih_v6_1_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
{
int rb_bufsz = order_base_2(ih->ring_size / 4);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
MC_SPACE, ih->use_bus_addr ? 2 : 4);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
WPTR_OVERFLOW_CLEAR, 1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
WPTR_OVERFLOW_ENABLE, 1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
/* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register
* value is written to memory
*/
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
WPTR_WRITEBACK_ENABLE, 1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
return ih_rb_cntl;
}
static uint32_t ih_v6_1_doorbell_rptr(struct amdgpu_ih_ring *ih)
{
u32 ih_doorbell_rtpr = 0;
if (ih->use_doorbell) {
ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
IH_DOORBELL_RPTR, OFFSET,
ih->doorbell_index);
ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
IH_DOORBELL_RPTR,
ENABLE, 1);
} else {
ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
IH_DOORBELL_RPTR,
ENABLE, 0);
}
return ih_doorbell_rtpr;
}
/**
* ih_v6_1_enable_ring - enable an ih ring buffer
*
* @adev: amdgpu_device pointer
* @ih: amdgpu_ih_ring pointer
*
* Enable an ih ring buffer (IH_V6_0)
*/
static int ih_v6_1_enable_ring(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
struct amdgpu_ih_regs *ih_regs;
uint32_t tmp;
ih_regs = &ih->ih_regs;
/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
WREG32(ih_regs->ih_rb_base, ih->gpu_addr >> 8);
WREG32(ih_regs->ih_rb_base_hi, (ih->gpu_addr >> 40) & 0xff);
tmp = RREG32(ih_regs->ih_rb_cntl);
tmp = ih_v6_1_rb_cntl(ih, tmp);
if (ih == &adev->irq.ih)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
if (ih == &adev->irq.ih1) {
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
}
if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
return -ETIMEDOUT;
}
} else {
WREG32(ih_regs->ih_rb_cntl, tmp);
}
if (ih == &adev->irq.ih) {
/* set the ih ring 0 writeback address whether it's enabled or not */
WREG32(ih_regs->ih_rb_wptr_addr_lo, lower_32_bits(ih->wptr_addr));
WREG32(ih_regs->ih_rb_wptr_addr_hi, upper_32_bits(ih->wptr_addr) & 0xFFFF);
}
/* set rptr, wptr to 0 */
WREG32(ih_regs->ih_rb_wptr, 0);
WREG32(ih_regs->ih_rb_rptr, 0);
WREG32(ih_regs->ih_doorbell_rptr, ih_v6_1_doorbell_rptr(ih));
return 0;
}
/**
* ih_v6_1_irq_init - init and enable the interrupt ring
*
* @adev: amdgpu_device pointer
*
* Allocate a ring buffer for the interrupt controller,
* enable the RLC, disable interrupts, enable the IH
* ring buffer and enable it.
* Called at device load and reume.
* Returns 0 for success, errors for failure.
*/
static int ih_v6_1_irq_init(struct amdgpu_device *adev)
{
struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1};
u32 ih_chicken;
u32 tmp;
int ret;
int i;
/* disable irqs */
ret = ih_v6_1_toggle_interrupts(adev, false);
if (ret)
return ret;
adev->nbio.funcs->ih_control(adev);
if (unlikely((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
(adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO))) {
if (ih[0]->use_bus_addr) {
ih_chicken = RREG32_SOC15(OSSSYS, 0, regIH_CHICKEN);
ih_chicken = REG_SET_FIELD(ih_chicken,
IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
WREG32_SOC15(OSSSYS, 0, regIH_CHICKEN, ih_chicken);
}
}
for (i = 0; i < ARRAY_SIZE(ih); i++) {
if (ih[i]->ring_size) {
ret = ih_v6_1_enable_ring(adev, ih[i]);
if (ret)
return ret;
}
}
/* update doorbell range for ih ring 0 */
adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell,
ih[0]->doorbell_index);
tmp = RREG32_SOC15(OSSSYS, 0, regIH_STORM_CLIENT_LIST_CNTL);
tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
CLIENT18_IS_STORM_CLIENT, 1);
WREG32_SOC15(OSSSYS, 0, regIH_STORM_CLIENT_LIST_CNTL, tmp);
tmp = RREG32_SOC15(OSSSYS, 0, regIH_INT_FLOOD_CNTL);
tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
WREG32_SOC15(OSSSYS, 0, regIH_INT_FLOOD_CNTL, tmp);
/* GC/MMHUB UTCL2 page fault interrupts are configured as
* MSI storm capable interrupts by deafult. The delay is
* used to avoid ISR being called too frequently
* when page fault happens on several continuous page
* and thus avoid MSI storm */
tmp = RREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL);
tmp = REG_SET_FIELD(tmp, IH_MSI_STORM_CTRL,
DELAY, 3);
WREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL, tmp);
pci_set_master(adev->pdev);
/* enable interrupts */
ret = ih_v6_1_toggle_interrupts(adev, true);
if (ret)
return ret;
/* enable wptr force update for self int */
force_update_wptr_for_self_int(adev, 0, 8, true);
if (adev->irq.ih_soft.ring_size)
adev->irq.ih_soft.enabled = true;
return 0;
}
/**
* ih_v6_1_irq_disable - disable interrupts
*
* @adev: amdgpu_device pointer
*
* Disable interrupts on the hw.
*/
static void ih_v6_1_irq_disable(struct amdgpu_device *adev)
{
force_update_wptr_for_self_int(adev, 0, 8, false);
ih_v6_1_toggle_interrupts(adev, false);
/* Wait and acknowledge irq */
mdelay(1);
}
/**
* ih_v6_1_get_wptr - get the IH ring buffer wptr
*
* @adev: amdgpu_device pointer
* @ih: amdgpu_ih_ring pointer
*
* Get the IH ring buffer wptr from either the register
* or the writeback memory buffer. Also check for
* ring buffer overflow and deal with it.
* Returns the value of the wptr.
*/
static u32 ih_v6_1_get_wptr(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
u32 wptr, tmp;
struct amdgpu_ih_regs *ih_regs;
wptr = le32_to_cpu(*ih->wptr_cpu);
ih_regs = &ih->ih_regs;
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
/* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 32). Hopefully
* this should allow us to catch up.
*/
tmp = (wptr + 32) & ih->ptr_mask;
dev_warn(adev->dev, "IH ring buffer overflow "
"(0x%08X, 0x%08X, 0x%08X)\n",
wptr, ih->rptr, tmp);
ih->rptr = tmp;
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
out:
return (wptr & ih->ptr_mask);
}
/**
* ih_v6_1_irq_rearm - rearm IRQ if lost
*
* @adev: amdgpu_device pointer
* @ih: amdgpu_ih_ring pointer
*
*/
static void ih_v6_1_irq_rearm(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
uint32_t v = 0;
uint32_t i = 0;
struct amdgpu_ih_regs *ih_regs;
ih_regs = &ih->ih_regs;
/* Rearm IRQ / re-write doorbell if doorbell write is lost */
for (i = 0; i < MAX_REARM_RETRY; i++) {
v = RREG32_NO_KIQ(ih_regs->ih_rb_rptr);
if ((v < ih->ring_size) && (v != ih->rptr))
WDOORBELL32(ih->doorbell_index, ih->rptr);
else
break;
}
}
/**
* ih_v6_1_set_rptr - set the IH ring buffer rptr
*
* @adev: amdgpu_device pointer
* @ih: amdgpu_ih_ring pointer
*
* Set the IH ring buffer rptr.
*/
static void ih_v6_1_set_rptr(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih)
{
struct amdgpu_ih_regs *ih_regs;
if (ih->use_doorbell) {
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
WDOORBELL32(ih->doorbell_index, ih->rptr);
if (amdgpu_sriov_vf(adev))
ih_v6_1_irq_rearm(adev, ih);
} else {
ih_regs = &ih->ih_regs;
WREG32(ih_regs->ih_rb_rptr, ih->rptr);
}
}
/**
* ih_v6_1_self_irq - dispatch work for ring 1
*
* @adev: amdgpu_device pointer
* @source: irq source
* @entry: IV with WPTR update
*
* Update the WPTR from the IV and schedule work to handle the entries.
*/
static int ih_v6_1_self_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
uint32_t wptr = cpu_to_le32(entry->src_data[0]);
switch (entry->ring_id) {
case 1:
*adev->irq.ih1.wptr_cpu = wptr;
schedule_work(&adev->irq.ih1_work);
break;
default:
break;
}
return 0;
}
static const struct amdgpu_irq_src_funcs ih_v6_1_self_irq_funcs = {
.process = ih_v6_1_self_irq,
};
static void ih_v6_1_set_self_irq_funcs(struct amdgpu_device *adev)
{
adev->irq.self_irq.num_types = 0;
adev->irq.self_irq.funcs = &ih_v6_1_self_irq_funcs;
}
static int ih_v6_1_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
ih_v6_1_set_interrupt_funcs(adev);
ih_v6_1_set_self_irq_funcs(adev);
return 0;
}
static int ih_v6_1_sw_init(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool use_bus_addr;
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_IH, 0,
&adev->irq.self_irq);
if (r)
return r;
/* use gpu virtual address for ih ring
* until ih_checken is programmed to allow
* use bus address for ih ring by psp bl */
use_bus_addr =
(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true;
r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
if (r)
return r;
adev->irq.ih.use_doorbell = true;
adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
adev->irq.ih1.ring_size = 0;
adev->irq.ih2.ring_size = 0;
/* initialize ih control register offset */
ih_v6_1_init_register_offset(adev);
r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, PAGE_SIZE, true);
if (r)
return r;
r = amdgpu_irq_init(adev);
return r;
}
static int ih_v6_1_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_fini_sw(adev);
return 0;
}
static int ih_v6_1_hw_init(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = ih_v6_1_irq_init(adev);
if (r)
return r;
return 0;
}
static int ih_v6_1_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
ih_v6_1_irq_disable(adev);
return 0;
}
static int ih_v6_1_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return ih_v6_1_hw_fini(adev);
}
static int ih_v6_1_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return ih_v6_1_hw_init(adev);
}
static bool ih_v6_1_is_idle(void *handle)
{
/* todo */
return true;
}
static int ih_v6_1_wait_for_idle(void *handle)
{
/* todo */
return -ETIMEDOUT;
}
static int ih_v6_1_soft_reset(void *handle)
{
/* todo */
return 0;
}
static void ih_v6_1_update_clockgating_state(struct amdgpu_device *adev,
bool enable)
{
uint32_t data, def, field_val;
if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) {
def = data = RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL);
field_val = enable ? 0 : 1;
data = REG_SET_FIELD(data, IH_CLK_CTRL,
DBUS_MUX_CLK_SOFT_OVERRIDE, field_val);
data = REG_SET_FIELD(data, IH_CLK_CTRL,
OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val);
data = REG_SET_FIELD(data, IH_CLK_CTRL,
LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val);
data = REG_SET_FIELD(data, IH_CLK_CTRL,
DYN_CLK_SOFT_OVERRIDE, field_val);
data = REG_SET_FIELD(data, IH_CLK_CTRL,
REG_CLK_SOFT_OVERRIDE, field_val);
if (def != data)
WREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL, data);
}
return;
}
static int ih_v6_1_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
ih_v6_1_update_clockgating_state(adev,
state == AMD_CG_STATE_GATE);
return 0;
}
static void ih_v6_1_update_ih_mem_power_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t ih_mem_pwr_cntl;
/* Disable ih sram power cntl before switch powergating mode */
ih_mem_pwr_cntl = RREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL);
ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
IH_BUFFER_MEM_POWER_CTRL_EN, 0);
WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl);
/* It is recommended to set mem powergating mode to DS mode */
if (enable) {
/* mem power mode */
ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
IH_BUFFER_MEM_POWER_LS_EN, 0);
ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
IH_BUFFER_MEM_POWER_DS_EN, 1);
ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
IH_BUFFER_MEM_POWER_SD_EN, 0);
/* cam mem power mode */
ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
IH_RETRY_INT_CAM_MEM_POWER_LS_EN, 0);
ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
IH_RETRY_INT_CAM_MEM_POWER_DS_EN, 1);
ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
IH_RETRY_INT_CAM_MEM_POWER_SD_EN, 0);
/* re-enable power cntl */
ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
IH_BUFFER_MEM_POWER_CTRL_EN, 1);
} else {
/* mem power mode */
ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
IH_BUFFER_MEM_POWER_LS_EN, 0);
ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
IH_BUFFER_MEM_POWER_DS_EN, 0);
ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
IH_BUFFER_MEM_POWER_SD_EN, 0);
/* cam mem power mode */
ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
IH_RETRY_INT_CAM_MEM_POWER_LS_EN, 0);
ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
IH_RETRY_INT_CAM_MEM_POWER_DS_EN, 0);
ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
IH_RETRY_INT_CAM_MEM_POWER_SD_EN, 0);
/* re-enable power cntl*/
ih_mem_pwr_cntl = REG_SET_FIELD(ih_mem_pwr_cntl, IH_MEM_POWER_CTRL,
IH_BUFFER_MEM_POWER_CTRL_EN, 1);
}
WREG32_SOC15(OSSSYS, 0, regIH_MEM_POWER_CTRL, ih_mem_pwr_cntl);
}
static int ih_v6_1_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_PG_STATE_GATE);
if (adev->pg_flags & AMD_PG_SUPPORT_IH_SRAM_PG)
ih_v6_1_update_ih_mem_power_gating(adev, enable);
return 0;
}
static void ih_v6_1_get_clockgating_state(void *handle, u64 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!RREG32_SOC15(OSSSYS, 0, regIH_CLK_CTRL))
*flags |= AMD_CG_SUPPORT_IH_CG;
return;
}
static const struct amd_ip_funcs ih_v6_1_ip_funcs = {
.name = "ih_v6_1",
.early_init = ih_v6_1_early_init,
.late_init = NULL,
.sw_init = ih_v6_1_sw_init,
.sw_fini = ih_v6_1_sw_fini,
.hw_init = ih_v6_1_hw_init,
.hw_fini = ih_v6_1_hw_fini,
.suspend = ih_v6_1_suspend,
.resume = ih_v6_1_resume,
.is_idle = ih_v6_1_is_idle,
.wait_for_idle = ih_v6_1_wait_for_idle,
.soft_reset = ih_v6_1_soft_reset,
.set_clockgating_state = ih_v6_1_set_clockgating_state,
.set_powergating_state = ih_v6_1_set_powergating_state,
.get_clockgating_state = ih_v6_1_get_clockgating_state,
};
static const struct amdgpu_ih_funcs ih_v6_1_funcs = {
.get_wptr = ih_v6_1_get_wptr,
.decode_iv = amdgpu_ih_decode_iv_helper,
.decode_iv_ts = amdgpu_ih_decode_iv_ts_helper,
.set_rptr = ih_v6_1_set_rptr
};
static void ih_v6_1_set_interrupt_funcs(struct amdgpu_device *adev)
{
adev->irq.ih_funcs = &ih_v6_1_funcs;
}
const struct amdgpu_ip_block_version ih_v6_1_ip_block = {
.type = AMD_IP_BLOCK_TYPE_IH,
.major = 6,
.minor = 0,
.rev = 0,
.funcs = &ih_v6_1_ip_funcs,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/ih_v6_1.c |
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "nbio/nbio_2_3_offset.h"
#include "nbio/nbio_2_3_sh_mask.h"
#include "gc/gc_10_1_0_offset.h"
#include "gc/gc_10_1_0_sh_mask.h"
#include "soc15.h"
#include "navi10_ih.h"
#include "soc15_common.h"
#include "mxgpu_nv.h"
#include "amdgpu_reset.h"
static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
{
WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
}
static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
{
WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
}
/*
* this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
* RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1
* by host.
*
* if called no in IRQ routine, this peek_msg cannot guaranteed to return the
* correct value since it doesn't return the RCV_DW0 under the case that
* RCV_MSG_VALID is set by host.
*/
static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
{
return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
}
static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
enum idh_event event)
{
u32 reg;
reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
if (reg != event)
return -ENOENT;
xgpu_nv_mailbox_send_ack(adev);
return 0;
}
static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev)
{
return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
}
static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
{
int timeout = NV_MAILBOX_POLL_ACK_TIMEDOUT;
u8 reg;
do {
reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
if (reg & 2)
return 0;
mdelay(5);
timeout -= 5;
} while (timeout > 1);
pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", NV_MAILBOX_POLL_ACK_TIMEDOUT);
return -ETIME;
}
static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
{
int r;
uint64_t timeout, now;
now = (uint64_t)ktime_to_ms(ktime_get());
timeout = now + NV_MAILBOX_POLL_MSG_TIMEDOUT;
do {
r = xgpu_nv_mailbox_rcv_msg(adev, event);
if (!r)
return 0;
msleep(10);
now = (uint64_t)ktime_to_ms(ktime_get());
} while (timeout > now);
return -ETIME;
}
static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
enum idh_request req, u32 data1, u32 data2, u32 data3)
{
int r;
uint8_t trn;
/* IMPORTANT:
* clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
* and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
* which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack()
* will return immediatly
*/
do {
xgpu_nv_mailbox_set_valid(adev, false);
trn = xgpu_nv_peek_ack(adev);
if (trn) {
pr_err("trn=%x ACK should not assert! wait again !\n", trn);
msleep(1);
}
} while (trn);
WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req);
WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1);
WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2);
WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3);
xgpu_nv_mailbox_set_valid(adev, true);
/* start to poll ack */
r = xgpu_nv_poll_ack(adev);
if (r)
pr_err("Doesn't get ack from pf, continue\n");
xgpu_nv_mailbox_set_valid(adev, false);
}
static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
enum idh_request req)
{
int r, retry = 1;
enum idh_event event = -1;
send_request:
xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
switch (req) {
case IDH_REQ_GPU_INIT_ACCESS:
case IDH_REQ_GPU_FINI_ACCESS:
case IDH_REQ_GPU_RESET_ACCESS:
event = IDH_READY_TO_ACCESS_GPU;
break;
case IDH_REQ_GPU_INIT_DATA:
event = IDH_REQ_GPU_INIT_DATA_READY;
break;
default:
break;
}
if (event != -1) {
r = xgpu_nv_poll_msg(adev, event);
if (r) {
if (retry++ < 2)
goto send_request;
if (req != IDH_REQ_GPU_INIT_DATA) {
pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
return r;
} else /* host doesn't support REQ_GPU_INIT_DATA handshake */
adev->virt.req_init_data_ver = 0;
} else {
if (req == IDH_REQ_GPU_INIT_DATA) {
adev->virt.req_init_data_ver =
RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1);
/* assume V1 in case host doesn't set version number */
if (adev->virt.req_init_data_ver < 1)
adev->virt.req_init_data_ver = 1;
}
}
/* Retrieve checksum from mailbox2 */
if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
adev->virt.fw_reserve.checksum_key =
RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
}
}
return 0;
}
static int xgpu_nv_request_reset(struct amdgpu_device *adev)
{
int ret, i = 0;
while (i < NV_MAILBOX_POLL_MSG_REP_MAX) {
ret = xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
if (!ret)
break;
i++;
}
return ret;
}
static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
bool init)
{
enum idh_request req;
req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
return xgpu_nv_send_access_requests(adev, req);
}
static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
bool init)
{
enum idh_request req;
int r = 0;
req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
r = xgpu_nv_send_access_requests(adev, req);
return r;
}
static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
{
return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
}
static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
DRM_DEBUG("get ack intr and do nothing.\n");
return 0;
}
static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
if (state == AMDGPU_IRQ_STATE_ENABLE)
tmp |= 2;
else
tmp &= ~2;
WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
return 0;
}
static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
{
struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
/* block amdgpu_gpu_recover till msg FLR COMPLETE received,
* otherwise the mailbox msg will be ruined/reseted by
* the VF FLR.
*/
if (atomic_cmpxchg(&adev->reset_domain->in_gpu_reset, 0, 1) != 0)
return;
down_write(&adev->reset_domain->sem);
amdgpu_virt_fini_data_exchange(adev);
xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
do {
if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
goto flr_done;
msleep(10);
timeout -= 10;
} while (timeout > 1);
flr_done:
atomic_set(&adev->reset_domain->in_gpu_reset, 0);
up_write(&adev->reset_domain->sem);
/* Trigger recovery for world switch failure if no TDR */
if (amdgpu_device_should_recover_gpu(adev)
&& (!amdgpu_device_has_job_running(adev) ||
adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
adev->video_timeout == MAX_SCHEDULE_TIMEOUT)) {
struct amdgpu_reset_context reset_context;
memset(&reset_context, 0, sizeof(reset_context));
reset_context.method = AMD_RESET_METHOD_NONE;
reset_context.reset_req_dev = adev;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
amdgpu_device_gpu_recover(adev, NULL, &reset_context);
}
}
static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
{
u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
if (state == AMDGPU_IRQ_STATE_ENABLE)
tmp |= 1;
else
tmp &= ~1;
WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
return 0;
}
static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
switch (event) {
case IDH_FLR_NOTIFICATION:
if (amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev))
WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
&adev->virt.flr_work),
"Failed to queue work! at %s",
__func__);
break;
/* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
* it byfar since that polling thread will handle it,
* other msg like flr complete is not handled here.
*/
case IDH_CLR_MSG_BUF:
case IDH_FLR_NOTIFICATION_CMPL:
case IDH_READY_TO_ACCESS_GPU:
default:
break;
}
return 0;
}
static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = {
.set = xgpu_nv_set_mailbox_ack_irq,
.process = xgpu_nv_mailbox_ack_irq,
};
static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = {
.set = xgpu_nv_set_mailbox_rcv_irq,
.process = xgpu_nv_mailbox_rcv_irq,
};
void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev)
{
adev->virt.ack_irq.num_types = 1;
adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs;
adev->virt.rcv_irq.num_types = 1;
adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs;
}
int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev)
{
int r;
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
if (r)
return r;
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
if (r) {
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
return r;
}
return 0;
}
int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
{
int r;
r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
if (r)
return r;
r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
if (r) {
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
return r;
}
INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
return 0;
}
void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
{
amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
}
static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev)
{
xgpu_nv_send_access_requests(adev, IDH_RAS_POISON);
}
const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.req_full_gpu = xgpu_nv_request_full_gpu_access,
.rel_full_gpu = xgpu_nv_release_full_gpu_access,
.req_init_data = xgpu_nv_request_init_data,
.reset_gpu = xgpu_nv_request_reset,
.wait_reset = NULL,
.trans_msg = xgpu_nv_mailbox_trans_msg,
.ras_poison_handler = xgpu_nv_ras_poison_handler,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c |
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "soc15.h"
#include "soc15_common.h"
#include "vega20_ip_offset.h"
int vega20_reg_base_init(struct amdgpu_device *adev)
{
/* HW has more IP blocks, only initialized the blocke beend by our driver */
uint32_t i;
for (i = 0 ; i < MAX_INSTANCE ; ++i) {
adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
adev->reg_offset[UVD_HWIP][i] = (uint32_t *)(&(UVD_BASE.instance[i]));
adev->reg_offset[VCE_HWIP][i] = (uint32_t *)(&(VCE_BASE.instance[i]));
adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCE_BASE.instance[i]));
adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(SDMA0_BASE.instance[i]));
adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(SDMA1_BASE.instance[i]));
adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
adev->reg_offset[UMC_HWIP][i] = (uint32_t *)(&(UMC_BASE.instance[i]));
adev->reg_offset[RSMU_HWIP][i] = (uint32_t *)(&(RSMU_BASE.instance[i]));
}
return 0;
}
void vega20_doorbell_index_init(struct amdgpu_device *adev)
{
adev->doorbell_index.kiq = AMDGPU_VEGA20_DOORBELL_KIQ;
adev->doorbell_index.mec_ring0 = AMDGPU_VEGA20_DOORBELL_MEC_RING0;
adev->doorbell_index.mec_ring1 = AMDGPU_VEGA20_DOORBELL_MEC_RING1;
adev->doorbell_index.mec_ring2 = AMDGPU_VEGA20_DOORBELL_MEC_RING2;
adev->doorbell_index.mec_ring3 = AMDGPU_VEGA20_DOORBELL_MEC_RING3;
adev->doorbell_index.mec_ring4 = AMDGPU_VEGA20_DOORBELL_MEC_RING4;
adev->doorbell_index.mec_ring5 = AMDGPU_VEGA20_DOORBELL_MEC_RING5;
adev->doorbell_index.mec_ring6 = AMDGPU_VEGA20_DOORBELL_MEC_RING6;
adev->doorbell_index.mec_ring7 = AMDGPU_VEGA20_DOORBELL_MEC_RING7;
adev->doorbell_index.userqueue_start = AMDGPU_VEGA20_DOORBELL_USERQUEUE_START;
adev->doorbell_index.userqueue_end = AMDGPU_VEGA20_DOORBELL_USERQUEUE_END;
adev->doorbell_index.gfx_ring0 = AMDGPU_VEGA20_DOORBELL_GFX_RING0;
adev->doorbell_index.sdma_engine[0] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0;
adev->doorbell_index.sdma_engine[1] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE1;
adev->doorbell_index.sdma_engine[2] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE2;
adev->doorbell_index.sdma_engine[3] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE3;
adev->doorbell_index.sdma_engine[4] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE4;
adev->doorbell_index.sdma_engine[5] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE5;
adev->doorbell_index.sdma_engine[6] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE6;
adev->doorbell_index.sdma_engine[7] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE7;
adev->doorbell_index.ih = AMDGPU_VEGA20_DOORBELL_IH;
adev->doorbell_index.uvd_vce.uvd_ring0_1 = AMDGPU_VEGA20_DOORBELL64_UVD_RING0_1;
adev->doorbell_index.uvd_vce.uvd_ring2_3 = AMDGPU_VEGA20_DOORBELL64_UVD_RING2_3;
adev->doorbell_index.uvd_vce.uvd_ring4_5 = AMDGPU_VEGA20_DOORBELL64_UVD_RING4_5;
adev->doorbell_index.uvd_vce.uvd_ring6_7 = AMDGPU_VEGA20_DOORBELL64_UVD_RING6_7;
adev->doorbell_index.uvd_vce.vce_ring0_1 = AMDGPU_VEGA20_DOORBELL64_VCE_RING0_1;
adev->doorbell_index.uvd_vce.vce_ring2_3 = AMDGPU_VEGA20_DOORBELL64_VCE_RING2_3;
adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5;
adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7;
adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_VEGA20_DOORBELL64_VCN0_1;
adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_VEGA20_DOORBELL64_VCN2_3;
adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_VEGA20_DOORBELL64_VCN4_5;
adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_VEGA20_DOORBELL64_VCN6_7;
adev->doorbell_index.first_non_cp = AMDGPU_VEGA20_DOORBELL64_FIRST_NON_CP;
adev->doorbell_index.last_non_cp = AMDGPU_VEGA20_DOORBELL64_LAST_NON_CP;
adev->doorbell_index.max_assignment = AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT << 1;
adev->doorbell_index.sdma_doorbell_range = 20;
}
| linux-master | drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c |
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "umc_v8_10.h"
#include "amdgpu_ras.h"
#include "amdgpu_umc.h"
#include "amdgpu.h"
#include "umc/umc_8_10_0_offset.h"
#include "umc/umc_8_10_0_sh_mask.h"
#define UMC_8_NODE_DIST 0x800000
#define UMC_8_INST_DIST 0x4000
struct channelnum_map_colbit {
uint32_t channel_num;
uint32_t col_bit;
};
const struct channelnum_map_colbit umc_v8_10_channelnum_map_colbit_table[] = {
{24, 13},
{20, 13},
{16, 12},
{14, 12},
{12, 12},
{10, 12},
{6, 11},
};
const uint32_t
umc_v8_10_channel_idx_tbl_ext0[]
[UMC_V8_10_UMC_INSTANCE_NUM]
[UMC_V8_10_CHANNEL_INSTANCE_NUM] = {
{{1, 5}, {7, 3}},
{{14, 15}, {13, 12}},
{{10, 11}, {9, 8}},
{{6, 2}, {0, 4}}
};
const uint32_t
umc_v8_10_channel_idx_tbl[]
[UMC_V8_10_UMC_INSTANCE_NUM]
[UMC_V8_10_CHANNEL_INSTANCE_NUM] = {
{{16, 18}, {17, 19}},
{{15, 11}, {3, 7}},
{{1, 5}, {13, 9}},
{{23, 21}, {22, 20}},
{{0, 4}, {12, 8}},
{{14, 10}, {2, 6}}
};
static inline uint32_t get_umc_v8_10_reg_offset(struct amdgpu_device *adev,
uint32_t node_inst,
uint32_t umc_inst,
uint32_t ch_inst)
{
return adev->umc.channel_offs * ch_inst + UMC_8_INST_DIST * umc_inst +
UMC_8_NODE_DIST * node_inst;
}
static int umc_v8_10_clear_error_count_per_channel(struct amdgpu_device *adev,
uint32_t node_inst, uint32_t umc_inst,
uint32_t ch_inst, void *data)
{
uint32_t ecc_err_cnt_addr;
uint32_t umc_reg_offset =
get_umc_v8_10_reg_offset(adev, node_inst, umc_inst, ch_inst);
ecc_err_cnt_addr =
SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_GeccErrCnt);
/* clear error count */
WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
UMC_V8_10_CE_CNT_INIT);
return 0;
}
static void umc_v8_10_clear_error_count(struct amdgpu_device *adev)
{
amdgpu_umc_loop_channels(adev,
umc_v8_10_clear_error_count_per_channel, NULL);
}
static void umc_v8_10_query_correctable_error_count(struct amdgpu_device *adev,
uint32_t umc_reg_offset,
unsigned long *error_count)
{
uint64_t mc_umc_status;
uint32_t mc_umc_status_addr;
/* UMC 8_10 registers */
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
/* Rely on MCUMC_STATUS for correctable error counter
* MCUMC_STATUS is a 64 bit register
*/
mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
*error_count += 1;
}
static void umc_v8_10_query_uncorrectable_error_count(struct amdgpu_device *adev,
uint32_t umc_reg_offset,
unsigned long *error_count)
{
uint64_t mc_umc_status;
uint32_t mc_umc_status_addr;
mc_umc_status_addr = SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
/* Check the MCUMC_STATUS. */
mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
*error_count += 1;
}
static int umc_v8_10_query_ecc_error_count(struct amdgpu_device *adev,
uint32_t node_inst, uint32_t umc_inst,
uint32_t ch_inst, void *data)
{
struct ras_err_data *err_data = (struct ras_err_data *)data;
uint32_t umc_reg_offset =
get_umc_v8_10_reg_offset(adev, node_inst, umc_inst, ch_inst);
umc_v8_10_query_correctable_error_count(adev,
umc_reg_offset,
&(err_data->ce_count));
umc_v8_10_query_uncorrectable_error_count(adev,
umc_reg_offset,
&(err_data->ue_count));
return 0;
}
static void umc_v8_10_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
amdgpu_umc_loop_channels(adev,
umc_v8_10_query_ecc_error_count, ras_error_status);
umc_v8_10_clear_error_count(adev);
}
static uint32_t umc_v8_10_get_col_bit(uint32_t channel_num)
{
uint32_t t = 0;
for (t = 0; t < ARRAY_SIZE(umc_v8_10_channelnum_map_colbit_table); t++)
if (channel_num == umc_v8_10_channelnum_map_colbit_table[t].channel_num)
return umc_v8_10_channelnum_map_colbit_table[t].col_bit;
/* Failed to get col_bit. */
return U32_MAX;
}
/*
* Mapping normal address to soc physical address in swizzle mode.
*/
static int umc_v8_10_swizzle_mode_na_to_pa(struct amdgpu_device *adev,
uint32_t channel_idx,
uint64_t na, uint64_t *soc_pa)
{
uint32_t channel_num = UMC_V8_10_TOTAL_CHANNEL_NUM(adev);
uint32_t col_bit = umc_v8_10_get_col_bit(channel_num);
uint64_t tmp_addr;
if (col_bit == U32_MAX)
return -1;
tmp_addr = SWIZZLE_MODE_TMP_ADDR(na, channel_num, channel_idx);
*soc_pa = SWIZZLE_MODE_ADDR_HI(tmp_addr, col_bit) |
SWIZZLE_MODE_ADDR_MID(na, col_bit) |
SWIZZLE_MODE_ADDR_LOW(tmp_addr, col_bit) |
SWIZZLE_MODE_ADDR_LSB(na);
return 0;
}
static void umc_v8_10_convert_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data, uint64_t err_addr,
uint32_t ch_inst, uint32_t umc_inst,
uint32_t node_inst, uint64_t mc_umc_status)
{
uint64_t na_err_addr_base;
uint64_t na_err_addr, retired_page_addr;
uint32_t channel_index, addr_lsb, col = 0;
int ret = 0;
channel_index =
adev->umc.channel_idx_tbl[node_inst * adev->umc.umc_inst_num *
adev->umc.channel_inst_num +
umc_inst * adev->umc.channel_inst_num +
ch_inst];
/* the lowest lsb bits should be ignored */
addr_lsb = REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrLsb);
err_addr &= ~((0x1ULL << addr_lsb) - 1);
na_err_addr_base = err_addr & ~(0x3ULL << UMC_V8_10_NA_C5_BIT);
/* loop for all possibilities of [C6 C5] in normal address. */
for (col = 0; col < UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; col++) {
na_err_addr = na_err_addr_base | (col << UMC_V8_10_NA_C5_BIT);
/* Mapping normal error address to retired soc physical address. */
ret = umc_v8_10_swizzle_mode_na_to_pa(adev, channel_index,
na_err_addr, &retired_page_addr);
if (ret) {
dev_err(adev->dev, "Failed to map pa from umc na.\n");
break;
}
dev_info(adev->dev, "Error Address(PA): 0x%llx\n",
retired_page_addr);
amdgpu_umc_fill_error_record(err_data, na_err_addr,
retired_page_addr, channel_index, umc_inst);
}
}
static int umc_v8_10_query_error_address(struct amdgpu_device *adev,
uint32_t node_inst, uint32_t umc_inst,
uint32_t ch_inst, void *data)
{
uint64_t mc_umc_status_addr;
uint64_t mc_umc_status, err_addr;
uint64_t mc_umc_addrt0;
struct ras_err_data *err_data = (struct ras_err_data *)data;
uint32_t umc_reg_offset =
get_umc_v8_10_reg_offset(adev, node_inst, umc_inst, ch_inst);
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
if (mc_umc_status == 0)
return 0;
if (!err_data->err_addr) {
/* clear umc status */
WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
return 0;
}
/* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrV) == 1 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
mc_umc_addrt0 = SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
umc_v8_10_convert_error_address(adev, err_data, err_addr,
ch_inst, umc_inst, node_inst, mc_umc_status);
}
/* clear umc status */
WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
return 0;
}
static void umc_v8_10_query_ras_error_address(struct amdgpu_device *adev,
void *ras_error_status)
{
amdgpu_umc_loop_channels(adev,
umc_v8_10_query_error_address, ras_error_status);
}
static int umc_v8_10_err_cnt_init_per_channel(struct amdgpu_device *adev,
uint32_t node_inst, uint32_t umc_inst,
uint32_t ch_inst, void *data)
{
uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
uint32_t ecc_err_cnt_addr;
uint32_t umc_reg_offset =
get_umc_v8_10_reg_offset(adev, node_inst, umc_inst, ch_inst);
ecc_err_cnt_sel_addr =
SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_GeccErrCntSel);
ecc_err_cnt_addr =
SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_GeccErrCnt);
ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
/* set ce error interrupt type to APIC based interrupt */
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_GeccErrCntSel,
GeccErrInt, 0x1);
WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
/* set error count to initial value */
WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V8_10_CE_CNT_INIT);
return 0;
}
static void umc_v8_10_err_cnt_init(struct amdgpu_device *adev)
{
amdgpu_umc_loop_channels(adev,
umc_v8_10_err_cnt_init_per_channel, NULL);
}
static bool umc_v8_10_query_ras_poison_mode(struct amdgpu_device *adev)
{
/*
* Force return true, because UMCCH0_0_GeccCtrl
* is not accessible from host side
*/
return true;
}
static void umc_v8_10_ecc_info_query_correctable_error_count(struct amdgpu_device *adev,
uint32_t node_inst, uint32_t umc_inst, uint32_t ch_inst,
unsigned long *error_count)
{
uint64_t mc_umc_status;
uint32_t eccinfo_table_idx;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
eccinfo_table_idx = node_inst * adev->umc.umc_inst_num *
adev->umc.channel_inst_num +
umc_inst * adev->umc.channel_inst_num +
ch_inst;
/* check the MCUMC_STATUS */
mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) {
*error_count += 1;
}
}
static void umc_v8_10_ecc_info_query_uncorrectable_error_count(struct amdgpu_device *adev,
uint32_t node_inst, uint32_t umc_inst, uint32_t ch_inst,
unsigned long *error_count)
{
uint64_t mc_umc_status;
uint32_t eccinfo_table_idx;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
eccinfo_table_idx = node_inst * adev->umc.umc_inst_num *
adev->umc.channel_inst_num +
umc_inst * adev->umc.channel_inst_num +
ch_inst;
/* check the MCUMC_STATUS */
mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) {
*error_count += 1;
}
}
static int umc_v8_10_ecc_info_query_ecc_error_count(struct amdgpu_device *adev,
uint32_t node_inst, uint32_t umc_inst,
uint32_t ch_inst, void *data)
{
struct ras_err_data *err_data = (struct ras_err_data *)data;
umc_v8_10_ecc_info_query_correctable_error_count(adev,
node_inst, umc_inst, ch_inst,
&(err_data->ce_count));
umc_v8_10_ecc_info_query_uncorrectable_error_count(adev,
node_inst, umc_inst, ch_inst,
&(err_data->ue_count));
return 0;
}
static void umc_v8_10_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
amdgpu_umc_loop_channels(adev,
umc_v8_10_ecc_info_query_ecc_error_count, ras_error_status);
}
static int umc_v8_10_ecc_info_query_error_address(struct amdgpu_device *adev,
uint32_t node_inst, uint32_t umc_inst,
uint32_t ch_inst, void *data)
{
uint32_t eccinfo_table_idx;
uint64_t mc_umc_status, err_addr;
struct ras_err_data *err_data = (struct ras_err_data *)data;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
eccinfo_table_idx = node_inst * adev->umc.umc_inst_num *
adev->umc.channel_inst_num +
umc_inst * adev->umc.channel_inst_num +
ch_inst;
mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
if (mc_umc_status == 0)
return 0;
if (!err_data->err_addr)
return 0;
/* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrV) == 1 &&
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1)) {
err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_addr;
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
umc_v8_10_convert_error_address(adev, err_data, err_addr,
ch_inst, umc_inst, node_inst, mc_umc_status);
}
return 0;
}
static void umc_v8_10_ecc_info_query_ras_error_address(struct amdgpu_device *adev,
void *ras_error_status)
{
amdgpu_umc_loop_channels(adev,
umc_v8_10_ecc_info_query_error_address, ras_error_status);
}
static void umc_v8_10_set_eeprom_table_version(struct amdgpu_ras_eeprom_table_header *hdr)
{
hdr->version = RAS_TABLE_VER_V2_1;
}
const struct amdgpu_ras_block_hw_ops umc_v8_10_ras_hw_ops = {
.query_ras_error_count = umc_v8_10_query_ras_error_count,
.query_ras_error_address = umc_v8_10_query_ras_error_address,
};
struct amdgpu_umc_ras umc_v8_10_ras = {
.ras_block = {
.hw_ops = &umc_v8_10_ras_hw_ops,
},
.err_cnt_init = umc_v8_10_err_cnt_init,
.query_ras_poison_mode = umc_v8_10_query_ras_poison_mode,
.ecc_info_query_ras_error_count = umc_v8_10_ecc_info_query_ras_error_count,
.ecc_info_query_ras_error_address = umc_v8_10_ecc_info_query_ras_error_address,
.set_eeprom_table_version = umc_v8_10_set_eeprom_table_version,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/umc_v8_10.c |
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "athub_v2_0.h"
#include "athub/athub_2_0_0_offset.h"
#include "athub/athub_2_0_0_sh_mask.h"
#include "athub/athub_2_0_0_default.h"
#include "soc15_common.h"
static void
athub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t def, data;
if (!(adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
return;
def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
if (enable)
data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
else
data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
if (def != data)
WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
}
static void
athub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
bool enable)
{
uint32_t def, data;
if (!((adev->cg_flags & AMD_CG_SUPPORT_MC_LS) &&
(adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)))
return;
def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
if (enable)
data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
else
data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
if (def != data)
WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
}
int athub_v2_0_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state)
{
if (amdgpu_sriov_vf(adev))
return 0;
switch (adev->ip_versions[ATHUB_HWIP][0]) {
case IP_VERSION(1, 3, 1):
case IP_VERSION(2, 0, 0):
case IP_VERSION(2, 0, 2):
athub_v2_0_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
athub_v2_0_update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE);
break;
default:
break;
}
return 0;
}
void athub_v2_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
{
int data;
/* AMD_CG_SUPPORT_ATHUB_MGCG */
data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
*flags |= AMD_CG_SUPPORT_ATHUB_MGCG;
/* AMD_CG_SUPPORT_ATHUB_LS */
if (data & ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK)
*flags |= AMD_CG_SUPPORT_ATHUB_LS;
}
| linux-master | drivers/gpu/drm/amd/amdgpu/athub_v2_0.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "soc15.h"
#include "soc15d.h"
#include "gc/gc_9_4_2_offset.h"
#include "gc/gc_9_4_2_sh_mask.h"
#include "gfx_v9_0.h"
#include "gfx_v9_4_2.h"
#include "amdgpu_ras.h"
#include "amdgpu_gfx.h"
#define SE_ID_MAX 8
#define CU_ID_MAX 16
#define SIMD_ID_MAX 4
#define WAVE_ID_MAX 10
enum gfx_v9_4_2_utc_type {
VML2_MEM,
VML2_WALKER_MEM,
UTCL2_MEM,
ATC_L2_CACHE_2M,
ATC_L2_CACHE_32K,
ATC_L2_CACHE_4K
};
struct gfx_v9_4_2_utc_block {
enum gfx_v9_4_2_utc_type type;
uint32_t num_banks;
uint32_t num_ways;
uint32_t num_mem_blocks;
struct soc15_reg idx_reg;
struct soc15_reg data_reg;
uint32_t sec_count_mask;
uint32_t sec_count_shift;
uint32_t ded_count_mask;
uint32_t ded_count_shift;
uint32_t clear;
};
static const struct soc15_reg_golden golden_settings_gc_9_4_2_alde_die_0[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_0, 0x3fffffff, 0x141dc920),
SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_1, 0x3fffffff, 0x3b458b93),
SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_2, 0x3fffffff, 0x1a4f5583),
SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_3, 0x3fffffff, 0x317717f6),
SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_4, 0x3fffffff, 0x107cc1e6),
SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_5, 0x3ff, 0x351),
};
static const struct soc15_reg_golden golden_settings_gc_9_4_2_alde_die_1[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_0, 0x3fffffff, 0x2591aa38),
SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_1, 0x3fffffff, 0xac9e88b),
SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_2, 0x3fffffff, 0x2bc3369b),
SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_3, 0x3fffffff, 0xfb74ee),
SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_4, 0x3fffffff, 0x21f0a2fe),
SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_5, 0x3ff, 0x49),
};
static const struct soc15_reg_golden golden_settings_gc_9_4_2_alde[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, regGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xfffffeef, 0x10b0000),
SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_UTCL1_CNTL1, 0xffffffff, 0x30800400),
SOC15_REG_GOLDEN_VALUE(GC, 0, regTCI_CNTL_3, 0xff, 0x20),
};
/*
* This shader is used to clear VGPRS and LDS, and also write the input
* pattern into the write back buffer, which will be used by driver to
* check whether all SIMDs have been covered.
*/
static const u32 vgpr_init_compute_shader_aldebaran[] = {
0xb8840904, 0xb8851a04, 0xb8861344, 0xb8831804, 0x9208ff06, 0x00000280,
0x9209a805, 0x920a8a04, 0x81080908, 0x81080a08, 0x81080308, 0x8e078208,
0x81078407, 0xc0410080, 0x00000007, 0xbf8c0000, 0xbf8a0000, 0xd3d94000,
0x18000080, 0xd3d94001, 0x18000080, 0xd3d94002, 0x18000080, 0xd3d94003,
0x18000080, 0xd3d94004, 0x18000080, 0xd3d94005, 0x18000080, 0xd3d94006,
0x18000080, 0xd3d94007, 0x18000080, 0xd3d94008, 0x18000080, 0xd3d94009,
0x18000080, 0xd3d9400a, 0x18000080, 0xd3d9400b, 0x18000080, 0xd3d9400c,
0x18000080, 0xd3d9400d, 0x18000080, 0xd3d9400e, 0x18000080, 0xd3d9400f,
0x18000080, 0xd3d94010, 0x18000080, 0xd3d94011, 0x18000080, 0xd3d94012,
0x18000080, 0xd3d94013, 0x18000080, 0xd3d94014, 0x18000080, 0xd3d94015,
0x18000080, 0xd3d94016, 0x18000080, 0xd3d94017, 0x18000080, 0xd3d94018,
0x18000080, 0xd3d94019, 0x18000080, 0xd3d9401a, 0x18000080, 0xd3d9401b,
0x18000080, 0xd3d9401c, 0x18000080, 0xd3d9401d, 0x18000080, 0xd3d9401e,
0x18000080, 0xd3d9401f, 0x18000080, 0xd3d94020, 0x18000080, 0xd3d94021,
0x18000080, 0xd3d94022, 0x18000080, 0xd3d94023, 0x18000080, 0xd3d94024,
0x18000080, 0xd3d94025, 0x18000080, 0xd3d94026, 0x18000080, 0xd3d94027,
0x18000080, 0xd3d94028, 0x18000080, 0xd3d94029, 0x18000080, 0xd3d9402a,
0x18000080, 0xd3d9402b, 0x18000080, 0xd3d9402c, 0x18000080, 0xd3d9402d,
0x18000080, 0xd3d9402e, 0x18000080, 0xd3d9402f, 0x18000080, 0xd3d94030,
0x18000080, 0xd3d94031, 0x18000080, 0xd3d94032, 0x18000080, 0xd3d94033,
0x18000080, 0xd3d94034, 0x18000080, 0xd3d94035, 0x18000080, 0xd3d94036,
0x18000080, 0xd3d94037, 0x18000080, 0xd3d94038, 0x18000080, 0xd3d94039,
0x18000080, 0xd3d9403a, 0x18000080, 0xd3d9403b, 0x18000080, 0xd3d9403c,
0x18000080, 0xd3d9403d, 0x18000080, 0xd3d9403e, 0x18000080, 0xd3d9403f,
0x18000080, 0xd3d94040, 0x18000080, 0xd3d94041, 0x18000080, 0xd3d94042,
0x18000080, 0xd3d94043, 0x18000080, 0xd3d94044, 0x18000080, 0xd3d94045,
0x18000080, 0xd3d94046, 0x18000080, 0xd3d94047, 0x18000080, 0xd3d94048,
0x18000080, 0xd3d94049, 0x18000080, 0xd3d9404a, 0x18000080, 0xd3d9404b,
0x18000080, 0xd3d9404c, 0x18000080, 0xd3d9404d, 0x18000080, 0xd3d9404e,
0x18000080, 0xd3d9404f, 0x18000080, 0xd3d94050, 0x18000080, 0xd3d94051,
0x18000080, 0xd3d94052, 0x18000080, 0xd3d94053, 0x18000080, 0xd3d94054,
0x18000080, 0xd3d94055, 0x18000080, 0xd3d94056, 0x18000080, 0xd3d94057,
0x18000080, 0xd3d94058, 0x18000080, 0xd3d94059, 0x18000080, 0xd3d9405a,
0x18000080, 0xd3d9405b, 0x18000080, 0xd3d9405c, 0x18000080, 0xd3d9405d,
0x18000080, 0xd3d9405e, 0x18000080, 0xd3d9405f, 0x18000080, 0xd3d94060,
0x18000080, 0xd3d94061, 0x18000080, 0xd3d94062, 0x18000080, 0xd3d94063,
0x18000080, 0xd3d94064, 0x18000080, 0xd3d94065, 0x18000080, 0xd3d94066,
0x18000080, 0xd3d94067, 0x18000080, 0xd3d94068, 0x18000080, 0xd3d94069,
0x18000080, 0xd3d9406a, 0x18000080, 0xd3d9406b, 0x18000080, 0xd3d9406c,
0x18000080, 0xd3d9406d, 0x18000080, 0xd3d9406e, 0x18000080, 0xd3d9406f,
0x18000080, 0xd3d94070, 0x18000080, 0xd3d94071, 0x18000080, 0xd3d94072,
0x18000080, 0xd3d94073, 0x18000080, 0xd3d94074, 0x18000080, 0xd3d94075,
0x18000080, 0xd3d94076, 0x18000080, 0xd3d94077, 0x18000080, 0xd3d94078,
0x18000080, 0xd3d94079, 0x18000080, 0xd3d9407a, 0x18000080, 0xd3d9407b,
0x18000080, 0xd3d9407c, 0x18000080, 0xd3d9407d, 0x18000080, 0xd3d9407e,
0x18000080, 0xd3d9407f, 0x18000080, 0xd3d94080, 0x18000080, 0xd3d94081,
0x18000080, 0xd3d94082, 0x18000080, 0xd3d94083, 0x18000080, 0xd3d94084,
0x18000080, 0xd3d94085, 0x18000080, 0xd3d94086, 0x18000080, 0xd3d94087,
0x18000080, 0xd3d94088, 0x18000080, 0xd3d94089, 0x18000080, 0xd3d9408a,
0x18000080, 0xd3d9408b, 0x18000080, 0xd3d9408c, 0x18000080, 0xd3d9408d,
0x18000080, 0xd3d9408e, 0x18000080, 0xd3d9408f, 0x18000080, 0xd3d94090,
0x18000080, 0xd3d94091, 0x18000080, 0xd3d94092, 0x18000080, 0xd3d94093,
0x18000080, 0xd3d94094, 0x18000080, 0xd3d94095, 0x18000080, 0xd3d94096,
0x18000080, 0xd3d94097, 0x18000080, 0xd3d94098, 0x18000080, 0xd3d94099,
0x18000080, 0xd3d9409a, 0x18000080, 0xd3d9409b, 0x18000080, 0xd3d9409c,
0x18000080, 0xd3d9409d, 0x18000080, 0xd3d9409e, 0x18000080, 0xd3d9409f,
0x18000080, 0xd3d940a0, 0x18000080, 0xd3d940a1, 0x18000080, 0xd3d940a2,
0x18000080, 0xd3d940a3, 0x18000080, 0xd3d940a4, 0x18000080, 0xd3d940a5,
0x18000080, 0xd3d940a6, 0x18000080, 0xd3d940a7, 0x18000080, 0xd3d940a8,
0x18000080, 0xd3d940a9, 0x18000080, 0xd3d940aa, 0x18000080, 0xd3d940ab,
0x18000080, 0xd3d940ac, 0x18000080, 0xd3d940ad, 0x18000080, 0xd3d940ae,
0x18000080, 0xd3d940af, 0x18000080, 0xd3d940b0, 0x18000080, 0xd3d940b1,
0x18000080, 0xd3d940b2, 0x18000080, 0xd3d940b3, 0x18000080, 0xd3d940b4,
0x18000080, 0xd3d940b5, 0x18000080, 0xd3d940b6, 0x18000080, 0xd3d940b7,
0x18000080, 0xd3d940b8, 0x18000080, 0xd3d940b9, 0x18000080, 0xd3d940ba,
0x18000080, 0xd3d940bb, 0x18000080, 0xd3d940bc, 0x18000080, 0xd3d940bd,
0x18000080, 0xd3d940be, 0x18000080, 0xd3d940bf, 0x18000080, 0xd3d940c0,
0x18000080, 0xd3d940c1, 0x18000080, 0xd3d940c2, 0x18000080, 0xd3d940c3,
0x18000080, 0xd3d940c4, 0x18000080, 0xd3d940c5, 0x18000080, 0xd3d940c6,
0x18000080, 0xd3d940c7, 0x18000080, 0xd3d940c8, 0x18000080, 0xd3d940c9,
0x18000080, 0xd3d940ca, 0x18000080, 0xd3d940cb, 0x18000080, 0xd3d940cc,
0x18000080, 0xd3d940cd, 0x18000080, 0xd3d940ce, 0x18000080, 0xd3d940cf,
0x18000080, 0xd3d940d0, 0x18000080, 0xd3d940d1, 0x18000080, 0xd3d940d2,
0x18000080, 0xd3d940d3, 0x18000080, 0xd3d940d4, 0x18000080, 0xd3d940d5,
0x18000080, 0xd3d940d6, 0x18000080, 0xd3d940d7, 0x18000080, 0xd3d940d8,
0x18000080, 0xd3d940d9, 0x18000080, 0xd3d940da, 0x18000080, 0xd3d940db,
0x18000080, 0xd3d940dc, 0x18000080, 0xd3d940dd, 0x18000080, 0xd3d940de,
0x18000080, 0xd3d940df, 0x18000080, 0xd3d940e0, 0x18000080, 0xd3d940e1,
0x18000080, 0xd3d940e2, 0x18000080, 0xd3d940e3, 0x18000080, 0xd3d940e4,
0x18000080, 0xd3d940e5, 0x18000080, 0xd3d940e6, 0x18000080, 0xd3d940e7,
0x18000080, 0xd3d940e8, 0x18000080, 0xd3d940e9, 0x18000080, 0xd3d940ea,
0x18000080, 0xd3d940eb, 0x18000080, 0xd3d940ec, 0x18000080, 0xd3d940ed,
0x18000080, 0xd3d940ee, 0x18000080, 0xd3d940ef, 0x18000080, 0xd3d940f0,
0x18000080, 0xd3d940f1, 0x18000080, 0xd3d940f2, 0x18000080, 0xd3d940f3,
0x18000080, 0xd3d940f4, 0x18000080, 0xd3d940f5, 0x18000080, 0xd3d940f6,
0x18000080, 0xd3d940f7, 0x18000080, 0xd3d940f8, 0x18000080, 0xd3d940f9,
0x18000080, 0xd3d940fa, 0x18000080, 0xd3d940fb, 0x18000080, 0xd3d940fc,
0x18000080, 0xd3d940fd, 0x18000080, 0xd3d940fe, 0x18000080, 0xd3d940ff,
0x18000080, 0xb07c0000, 0xbe8a00ff, 0x000000f8, 0xbf11080a, 0x7e000280,
0x7e020280, 0x7e040280, 0x7e060280, 0x7e080280, 0x7e0a0280, 0x7e0c0280,
0x7e0e0280, 0x808a880a, 0xbe80320a, 0xbf84fff5, 0xbf9c0000, 0xd28c0001,
0x0001007f, 0xd28d0001, 0x0002027e, 0x10020288, 0xbe8b0004, 0xb78b4000,
0xd1196a01, 0x00001701, 0xbe8a0087, 0xbefc00c1, 0xd89c4000, 0x00020201,
0xd89cc080, 0x00040401, 0x320202ff, 0x00000800, 0x808a810a, 0xbf84fff8,
0xbf810000,
};
const struct soc15_reg_entry vgpr_init_regs_aldebaran[] = {
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_X), 0x40 },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Y), 4 },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Z), 1 },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC1), 0xbf },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC2), 0x400006 }, /* 64KB LDS */
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC3), 0x3F }, /* 63 - accum-offset = 256 */
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
};
/*
* The below shaders are used to clear SGPRS, and also write the input
* pattern into the write back buffer. The first two dispatch should be
* scheduled simultaneously which make sure that all SGPRS could be
* allocated, so the dispatch 1 need check write back buffer before scheduled,
* make sure that waves of dispatch 0 are all dispacthed to all simds
* balanced. both dispatch 0 and dispatch 1 should be halted until all waves
* are dispatched, and then driver write a pattern to the shared memory to make
* all waves continue.
*/
static const u32 sgpr112_init_compute_shader_aldebaran[] = {
0xb8840904, 0xb8851a04, 0xb8861344, 0xb8831804, 0x9208ff06, 0x00000280,
0x9209a805, 0x920a8a04, 0x81080908, 0x81080a08, 0x81080308, 0x8e078208,
0x81078407, 0xc0410080, 0x00000007, 0xbf8c0000, 0xbf8e003f, 0xc0030200,
0x00000000, 0xbf8c0000, 0xbf06ff08, 0xdeadbeaf, 0xbf84fff9, 0x81028102,
0xc0410080, 0x00000007, 0xbf8c0000, 0xbf8a0000, 0xbefc0080, 0xbeea0080,
0xbeeb0080, 0xbf00f280, 0xbee60080, 0xbee70080, 0xbee80080, 0xbee90080,
0xbefe0080, 0xbeff0080, 0xbe880080, 0xbe890080, 0xbe8a0080, 0xbe8b0080,
0xbe8c0080, 0xbe8d0080, 0xbe8e0080, 0xbe8f0080, 0xbe900080, 0xbe910080,
0xbe920080, 0xbe930080, 0xbe940080, 0xbe950080, 0xbe960080, 0xbe970080,
0xbe980080, 0xbe990080, 0xbe9a0080, 0xbe9b0080, 0xbe9c0080, 0xbe9d0080,
0xbe9e0080, 0xbe9f0080, 0xbea00080, 0xbea10080, 0xbea20080, 0xbea30080,
0xbea40080, 0xbea50080, 0xbea60080, 0xbea70080, 0xbea80080, 0xbea90080,
0xbeaa0080, 0xbeab0080, 0xbeac0080, 0xbead0080, 0xbeae0080, 0xbeaf0080,
0xbeb00080, 0xbeb10080, 0xbeb20080, 0xbeb30080, 0xbeb40080, 0xbeb50080,
0xbeb60080, 0xbeb70080, 0xbeb80080, 0xbeb90080, 0xbeba0080, 0xbebb0080,
0xbebc0080, 0xbebd0080, 0xbebe0080, 0xbebf0080, 0xbec00080, 0xbec10080,
0xbec20080, 0xbec30080, 0xbec40080, 0xbec50080, 0xbec60080, 0xbec70080,
0xbec80080, 0xbec90080, 0xbeca0080, 0xbecb0080, 0xbecc0080, 0xbecd0080,
0xbece0080, 0xbecf0080, 0xbed00080, 0xbed10080, 0xbed20080, 0xbed30080,
0xbed40080, 0xbed50080, 0xbed60080, 0xbed70080, 0xbed80080, 0xbed90080,
0xbeda0080, 0xbedb0080, 0xbedc0080, 0xbedd0080, 0xbede0080, 0xbedf0080,
0xbee00080, 0xbee10080, 0xbee20080, 0xbee30080, 0xbee40080, 0xbee50080,
0xbf810000
};
const struct soc15_reg_entry sgpr112_init_regs_aldebaran[] = {
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_X), 0x40 },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Y), 8 },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Z), 1 },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC1), 0x340 },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC2), 0x6 },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC3), 0x0 },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
};
static const u32 sgpr96_init_compute_shader_aldebaran[] = {
0xb8840904, 0xb8851a04, 0xb8861344, 0xb8831804, 0x9208ff06, 0x00000280,
0x9209a805, 0x920a8a04, 0x81080908, 0x81080a08, 0x81080308, 0x8e078208,
0x81078407, 0xc0410080, 0x00000007, 0xbf8c0000, 0xbf8e003f, 0xc0030200,
0x00000000, 0xbf8c0000, 0xbf06ff08, 0xdeadbeaf, 0xbf84fff9, 0x81028102,
0xc0410080, 0x00000007, 0xbf8c0000, 0xbf8a0000, 0xbefc0080, 0xbeea0080,
0xbeeb0080, 0xbf00f280, 0xbee60080, 0xbee70080, 0xbee80080, 0xbee90080,
0xbefe0080, 0xbeff0080, 0xbe880080, 0xbe890080, 0xbe8a0080, 0xbe8b0080,
0xbe8c0080, 0xbe8d0080, 0xbe8e0080, 0xbe8f0080, 0xbe900080, 0xbe910080,
0xbe920080, 0xbe930080, 0xbe940080, 0xbe950080, 0xbe960080, 0xbe970080,
0xbe980080, 0xbe990080, 0xbe9a0080, 0xbe9b0080, 0xbe9c0080, 0xbe9d0080,
0xbe9e0080, 0xbe9f0080, 0xbea00080, 0xbea10080, 0xbea20080, 0xbea30080,
0xbea40080, 0xbea50080, 0xbea60080, 0xbea70080, 0xbea80080, 0xbea90080,
0xbeaa0080, 0xbeab0080, 0xbeac0080, 0xbead0080, 0xbeae0080, 0xbeaf0080,
0xbeb00080, 0xbeb10080, 0xbeb20080, 0xbeb30080, 0xbeb40080, 0xbeb50080,
0xbeb60080, 0xbeb70080, 0xbeb80080, 0xbeb90080, 0xbeba0080, 0xbebb0080,
0xbebc0080, 0xbebd0080, 0xbebe0080, 0xbebf0080, 0xbec00080, 0xbec10080,
0xbec20080, 0xbec30080, 0xbec40080, 0xbec50080, 0xbec60080, 0xbec70080,
0xbec80080, 0xbec90080, 0xbeca0080, 0xbecb0080, 0xbecc0080, 0xbecd0080,
0xbece0080, 0xbecf0080, 0xbed00080, 0xbed10080, 0xbed20080, 0xbed30080,
0xbed40080, 0xbed50080, 0xbed60080, 0xbed70080, 0xbed80080, 0xbed90080,
0xbf810000,
};
const struct soc15_reg_entry sgpr96_init_regs_aldebaran[] = {
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_X), 0x40 },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Y), 0xc },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Z), 1 },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC1), 0x2c0 },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC2), 0x6 },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC3), 0x0 },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
};
/*
* This shader is used to clear the uninitiated sgprs after the above
* two dispatches, because of hardware feature, dispath 0 couldn't clear
* top hole sgprs. Therefore need 4 waves per SIMD to cover these sgprs
*/
static const u32 sgpr64_init_compute_shader_aldebaran[] = {
0xb8840904, 0xb8851a04, 0xb8861344, 0xb8831804, 0x9208ff06, 0x00000280,
0x9209a805, 0x920a8a04, 0x81080908, 0x81080a08, 0x81080308, 0x8e078208,
0x81078407, 0xc0410080, 0x00000007, 0xbf8c0000, 0xbf8e003f, 0xc0030200,
0x00000000, 0xbf8c0000, 0xbf06ff08, 0xdeadbeaf, 0xbf84fff9, 0x81028102,
0xc0410080, 0x00000007, 0xbf8c0000, 0xbf8a0000, 0xbefc0080, 0xbeea0080,
0xbeeb0080, 0xbf00f280, 0xbee60080, 0xbee70080, 0xbee80080, 0xbee90080,
0xbefe0080, 0xbeff0080, 0xbe880080, 0xbe890080, 0xbe8a0080, 0xbe8b0080,
0xbe8c0080, 0xbe8d0080, 0xbe8e0080, 0xbe8f0080, 0xbe900080, 0xbe910080,
0xbe920080, 0xbe930080, 0xbe940080, 0xbe950080, 0xbe960080, 0xbe970080,
0xbe980080, 0xbe990080, 0xbe9a0080, 0xbe9b0080, 0xbe9c0080, 0xbe9d0080,
0xbe9e0080, 0xbe9f0080, 0xbea00080, 0xbea10080, 0xbea20080, 0xbea30080,
0xbea40080, 0xbea50080, 0xbea60080, 0xbea70080, 0xbea80080, 0xbea90080,
0xbeaa0080, 0xbeab0080, 0xbeac0080, 0xbead0080, 0xbeae0080, 0xbeaf0080,
0xbeb00080, 0xbeb10080, 0xbeb20080, 0xbeb30080, 0xbeb40080, 0xbeb50080,
0xbeb60080, 0xbeb70080, 0xbeb80080, 0xbeb90080, 0xbf810000,
};
const struct soc15_reg_entry sgpr64_init_regs_aldebaran[] = {
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_X), 0x40 },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Y), 0x10 },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Z), 1 },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC1), 0x1c0 },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC2), 0x6 },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC3), 0x0 },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
{ SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
};
static int gfx_v9_4_2_run_shader(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
const u32 *shader_ptr, u32 shader_size,
const struct soc15_reg_entry *init_regs, u32 regs_size,
u32 compute_dim_x, u64 wb_gpu_addr, u32 pattern,
struct dma_fence **fence_ptr)
{
int r, i;
uint32_t total_size, shader_offset;
u64 gpu_addr;
total_size = (regs_size * 3 + 4 + 5 + 5) * 4;
total_size = ALIGN(total_size, 256);
shader_offset = total_size;
total_size += ALIGN(shader_size, 256);
/* allocate an indirect buffer to put the commands in */
memset(ib, 0, sizeof(*ib));
r = amdgpu_ib_get(adev, NULL, total_size,
AMDGPU_IB_POOL_DIRECT, ib);
if (r) {
dev_err(adev->dev, "failed to get ib (%d).\n", r);
return r;
}
/* load the compute shaders */
for (i = 0; i < shader_size/sizeof(u32); i++)
ib->ptr[i + (shader_offset / 4)] = shader_ptr[i];
/* init the ib length to 0 */
ib->length_dw = 0;
/* write the register state for the compute dispatch */
for (i = 0; i < regs_size; i++) {
ib->ptr[ib->length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
ib->ptr[ib->length_dw++] = SOC15_REG_ENTRY_OFFSET(init_regs[i])
- PACKET3_SET_SH_REG_START;
ib->ptr[ib->length_dw++] = init_regs[i].reg_value;
}
/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
gpu_addr = (ib->gpu_addr + (u64)shader_offset) >> 8;
ib->ptr[ib->length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
ib->ptr[ib->length_dw++] = SOC15_REG_OFFSET(GC, 0, regCOMPUTE_PGM_LO)
- PACKET3_SET_SH_REG_START;
ib->ptr[ib->length_dw++] = lower_32_bits(gpu_addr);
ib->ptr[ib->length_dw++] = upper_32_bits(gpu_addr);
/* write the wb buffer address */
ib->ptr[ib->length_dw++] = PACKET3(PACKET3_SET_SH_REG, 3);
ib->ptr[ib->length_dw++] = SOC15_REG_OFFSET(GC, 0, regCOMPUTE_USER_DATA_0)
- PACKET3_SET_SH_REG_START;
ib->ptr[ib->length_dw++] = lower_32_bits(wb_gpu_addr);
ib->ptr[ib->length_dw++] = upper_32_bits(wb_gpu_addr);
ib->ptr[ib->length_dw++] = pattern;
/* write dispatch packet */
ib->ptr[ib->length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
ib->ptr[ib->length_dw++] = compute_dim_x; /* x */
ib->ptr[ib->length_dw++] = 1; /* y */
ib->ptr[ib->length_dw++] = 1; /* z */
ib->ptr[ib->length_dw++] =
REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
/* shedule the ib on the ring */
r = amdgpu_ib_schedule(ring, 1, ib, NULL, fence_ptr);
if (r) {
dev_err(adev->dev, "ib submit failed (%d).\n", r);
amdgpu_ib_free(adev, ib, NULL);
}
return r;
}
static void gfx_v9_4_2_log_wave_assignment(struct amdgpu_device *adev, uint32_t *wb_ptr)
{
uint32_t se, cu, simd, wave;
uint32_t offset = 0;
char *str;
int size;
str = kmalloc(256, GFP_KERNEL);
if (!str)
return;
dev_dbg(adev->dev, "wave assignment:\n");
for (se = 0; se < adev->gfx.config.max_shader_engines; se++) {
for (cu = 0; cu < CU_ID_MAX; cu++) {
memset(str, 0, 256);
size = sprintf(str, "SE[%02d]CU[%02d]: ", se, cu);
for (simd = 0; simd < SIMD_ID_MAX; simd++) {
size += sprintf(str + size, "[");
for (wave = 0; wave < WAVE_ID_MAX; wave++) {
size += sprintf(str + size, "%x", wb_ptr[offset]);
offset++;
}
size += sprintf(str + size, "] ");
}
dev_dbg(adev->dev, "%s\n", str);
}
}
kfree(str);
}
static int gfx_v9_4_2_wait_for_waves_assigned(struct amdgpu_device *adev,
uint32_t *wb_ptr, uint32_t mask,
uint32_t pattern, uint32_t num_wave, bool wait)
{
uint32_t se, cu, simd, wave;
uint32_t loop = 0;
uint32_t wave_cnt;
uint32_t offset;
do {
wave_cnt = 0;
offset = 0;
for (se = 0; se < adev->gfx.config.max_shader_engines; se++)
for (cu = 0; cu < CU_ID_MAX; cu++)
for (simd = 0; simd < SIMD_ID_MAX; simd++)
for (wave = 0; wave < WAVE_ID_MAX; wave++) {
if (((1 << wave) & mask) &&
(wb_ptr[offset] == pattern))
wave_cnt++;
offset++;
}
if (wave_cnt == num_wave)
return 0;
mdelay(1);
} while (++loop < 2000 && wait);
dev_err(adev->dev, "actual wave num: %d, expected wave num: %d\n",
wave_cnt, num_wave);
gfx_v9_4_2_log_wave_assignment(adev, wb_ptr);
return -EBADSLT;
}
static int gfx_v9_4_2_do_sgprs_init(struct amdgpu_device *adev)
{
int r;
int wb_size = adev->gfx.config.max_shader_engines *
CU_ID_MAX * SIMD_ID_MAX * WAVE_ID_MAX;
struct amdgpu_ib wb_ib;
struct amdgpu_ib disp_ibs[3];
struct dma_fence *fences[3];
u32 pattern[3] = { 0x1, 0x5, 0xa };
/* bail if the compute ring is not ready */
if (!adev->gfx.compute_ring[0].sched.ready ||
!adev->gfx.compute_ring[1].sched.ready)
return 0;
/* allocate the write-back buffer from IB */
memset(&wb_ib, 0, sizeof(wb_ib));
r = amdgpu_ib_get(adev, NULL, (1 + wb_size) * sizeof(uint32_t),
AMDGPU_IB_POOL_DIRECT, &wb_ib);
if (r) {
dev_err(adev->dev, "failed to get ib (%d) for wb\n", r);
return r;
}
memset(wb_ib.ptr, 0, (1 + wb_size) * sizeof(uint32_t));
r = gfx_v9_4_2_run_shader(adev,
&adev->gfx.compute_ring[0],
&disp_ibs[0],
sgpr112_init_compute_shader_aldebaran,
sizeof(sgpr112_init_compute_shader_aldebaran),
sgpr112_init_regs_aldebaran,
ARRAY_SIZE(sgpr112_init_regs_aldebaran),
adev->gfx.cu_info.number,
wb_ib.gpu_addr, pattern[0], &fences[0]);
if (r) {
dev_err(adev->dev, "failed to clear first 224 sgprs\n");
goto pro_end;
}
r = gfx_v9_4_2_wait_for_waves_assigned(adev,
&wb_ib.ptr[1], 0b11,
pattern[0],
adev->gfx.cu_info.number * SIMD_ID_MAX * 2,
true);
if (r) {
dev_err(adev->dev, "wave coverage failed when clear first 224 sgprs\n");
wb_ib.ptr[0] = 0xdeadbeaf; /* stop waves */
goto disp0_failed;
}
r = gfx_v9_4_2_run_shader(adev,
&adev->gfx.compute_ring[1],
&disp_ibs[1],
sgpr96_init_compute_shader_aldebaran,
sizeof(sgpr96_init_compute_shader_aldebaran),
sgpr96_init_regs_aldebaran,
ARRAY_SIZE(sgpr96_init_regs_aldebaran),
adev->gfx.cu_info.number * 2,
wb_ib.gpu_addr, pattern[1], &fences[1]);
if (r) {
dev_err(adev->dev, "failed to clear next 576 sgprs\n");
goto disp0_failed;
}
r = gfx_v9_4_2_wait_for_waves_assigned(adev,
&wb_ib.ptr[1], 0b11111100,
pattern[1], adev->gfx.cu_info.number * SIMD_ID_MAX * 6,
true);
if (r) {
dev_err(adev->dev, "wave coverage failed when clear first 576 sgprs\n");
wb_ib.ptr[0] = 0xdeadbeaf; /* stop waves */
goto disp1_failed;
}
wb_ib.ptr[0] = 0xdeadbeaf; /* stop waves */
/* wait for the GPU to finish processing the IB */
r = dma_fence_wait(fences[0], false);
if (r) {
dev_err(adev->dev, "timeout to clear first 224 sgprs\n");
goto disp1_failed;
}
r = dma_fence_wait(fences[1], false);
if (r) {
dev_err(adev->dev, "timeout to clear first 576 sgprs\n");
goto disp1_failed;
}
memset(wb_ib.ptr, 0, (1 + wb_size) * sizeof(uint32_t));
r = gfx_v9_4_2_run_shader(adev,
&adev->gfx.compute_ring[0],
&disp_ibs[2],
sgpr64_init_compute_shader_aldebaran,
sizeof(sgpr64_init_compute_shader_aldebaran),
sgpr64_init_regs_aldebaran,
ARRAY_SIZE(sgpr64_init_regs_aldebaran),
adev->gfx.cu_info.number,
wb_ib.gpu_addr, pattern[2], &fences[2]);
if (r) {
dev_err(adev->dev, "failed to clear first 256 sgprs\n");
goto disp1_failed;
}
r = gfx_v9_4_2_wait_for_waves_assigned(adev,
&wb_ib.ptr[1], 0b1111,
pattern[2],
adev->gfx.cu_info.number * SIMD_ID_MAX * 4,
true);
if (r) {
dev_err(adev->dev, "wave coverage failed when clear first 256 sgprs\n");
wb_ib.ptr[0] = 0xdeadbeaf; /* stop waves */
goto disp2_failed;
}
wb_ib.ptr[0] = 0xdeadbeaf; /* stop waves */
r = dma_fence_wait(fences[2], false);
if (r) {
dev_err(adev->dev, "timeout to clear first 256 sgprs\n");
goto disp2_failed;
}
disp2_failed:
amdgpu_ib_free(adev, &disp_ibs[2], NULL);
dma_fence_put(fences[2]);
disp1_failed:
amdgpu_ib_free(adev, &disp_ibs[1], NULL);
dma_fence_put(fences[1]);
disp0_failed:
amdgpu_ib_free(adev, &disp_ibs[0], NULL);
dma_fence_put(fences[0]);
pro_end:
amdgpu_ib_free(adev, &wb_ib, NULL);
if (r)
dev_info(adev->dev, "Init SGPRS Failed\n");
else
dev_info(adev->dev, "Init SGPRS Successfully\n");
return r;
}
static int gfx_v9_4_2_do_vgprs_init(struct amdgpu_device *adev)
{
int r;
/* CU_ID: 0~15, SIMD_ID: 0~3, WAVE_ID: 0 ~ 9 */
int wb_size = adev->gfx.config.max_shader_engines *
CU_ID_MAX * SIMD_ID_MAX * WAVE_ID_MAX;
struct amdgpu_ib wb_ib;
struct amdgpu_ib disp_ib;
struct dma_fence *fence;
u32 pattern = 0xa;
/* bail if the compute ring is not ready */
if (!adev->gfx.compute_ring[0].sched.ready)
return 0;
/* allocate the write-back buffer from IB */
memset(&wb_ib, 0, sizeof(wb_ib));
r = amdgpu_ib_get(adev, NULL, (1 + wb_size) * sizeof(uint32_t),
AMDGPU_IB_POOL_DIRECT, &wb_ib);
if (r) {
dev_err(adev->dev, "failed to get ib (%d) for wb.\n", r);
return r;
}
memset(wb_ib.ptr, 0, (1 + wb_size) * sizeof(uint32_t));
r = gfx_v9_4_2_run_shader(adev,
&adev->gfx.compute_ring[0],
&disp_ib,
vgpr_init_compute_shader_aldebaran,
sizeof(vgpr_init_compute_shader_aldebaran),
vgpr_init_regs_aldebaran,
ARRAY_SIZE(vgpr_init_regs_aldebaran),
adev->gfx.cu_info.number,
wb_ib.gpu_addr, pattern, &fence);
if (r) {
dev_err(adev->dev, "failed to clear vgprs\n");
goto pro_end;
}
/* wait for the GPU to finish processing the IB */
r = dma_fence_wait(fence, false);
if (r) {
dev_err(adev->dev, "timeout to clear vgprs\n");
goto disp_failed;
}
r = gfx_v9_4_2_wait_for_waves_assigned(adev,
&wb_ib.ptr[1], 0b1,
pattern,
adev->gfx.cu_info.number * SIMD_ID_MAX,
false);
if (r) {
dev_err(adev->dev, "failed to cover all simds when clearing vgprs\n");
goto disp_failed;
}
disp_failed:
amdgpu_ib_free(adev, &disp_ib, NULL);
dma_fence_put(fence);
pro_end:
amdgpu_ib_free(adev, &wb_ib, NULL);
if (r)
dev_info(adev->dev, "Init VGPRS Failed\n");
else
dev_info(adev->dev, "Init VGPRS Successfully\n");
return r;
}
int gfx_v9_4_2_do_edc_gpr_workarounds(struct amdgpu_device *adev)
{
/* only support when RAS is enabled */
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
return 0;
/* Workaround for ALDEBARAN, skip GPRs init in GPU reset.
Will remove it once GPRs init algorithm works for all CU settings. */
if (amdgpu_in_reset(adev))
return 0;
gfx_v9_4_2_do_sgprs_init(adev);
gfx_v9_4_2_do_vgprs_init(adev);
return 0;
}
static void gfx_v9_4_2_query_sq_timeout_status(struct amdgpu_device *adev);
static void gfx_v9_4_2_reset_sq_timeout_status(struct amdgpu_device *adev);
void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev,
uint32_t die_id)
{
soc15_program_register_sequence(adev,
golden_settings_gc_9_4_2_alde,
ARRAY_SIZE(golden_settings_gc_9_4_2_alde));
/* apply golden settings per die */
switch (die_id) {
case 0:
soc15_program_register_sequence(adev,
golden_settings_gc_9_4_2_alde_die_0,
ARRAY_SIZE(golden_settings_gc_9_4_2_alde_die_0));
break;
case 1:
soc15_program_register_sequence(adev,
golden_settings_gc_9_4_2_alde_die_1,
ARRAY_SIZE(golden_settings_gc_9_4_2_alde_die_1));
break;
default:
dev_warn(adev->dev,
"invalid die id %d, ignore channel fabricid remap settings\n",
die_id);
break;
}
return;
}
void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev,
uint32_t first_vmid,
uint32_t last_vmid)
{
uint32_t data;
int i;
mutex_lock(&adev->srbm_mutex);
for (i = first_vmid; i < last_vmid; i++) {
data = 0;
soc15_grbm_select(adev, 0, 0, 0, i, 0);
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0);
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE,
0);
WREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_PER_VMID_CNTL), data);
}
soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
WREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_TRAP_DATA0), 0);
WREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_TRAP_DATA1), 0);
}
void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev)
{
u32 tmp;
gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
tmp = 0;
tmp = REG_SET_FIELD(tmp, GC_THROTTLE_CTRL, PATTERN_MODE, 1);
WREG32_SOC15(GC, 0, regGC_THROTTLE_CTRL, tmp);
tmp = 0;
tmp = REG_SET_FIELD(tmp, GC_THROTTLE_CTRL1, PWRBRK_STALL_EN, 1);
WREG32_SOC15(GC, 0, regGC_THROTTLE_CTRL1, tmp);
WREG32_SOC15(GC, 0, regGC_CAC_IND_INDEX, ixPWRBRK_STALL_PATTERN_CTRL);
tmp = 0;
tmp = REG_SET_FIELD(tmp, PWRBRK_STALL_PATTERN_CTRL, PWRBRK_END_STEP, 0x12);
WREG32_SOC15(GC, 0, regGC_CAC_IND_DATA, tmp);
}
static const struct soc15_reg_entry gfx_v9_4_2_edc_counter_regs[] = {
/* CPF */
{ SOC15_REG_ENTRY(GC, 0, regCPF_EDC_ROQ_CNT), 0, 1, 1 },
{ SOC15_REG_ENTRY(GC, 0, regCPF_EDC_TAG_CNT), 0, 1, 1 },
/* CPC */
{ SOC15_REG_ENTRY(GC, 0, regCPC_EDC_SCRATCH_CNT), 0, 1, 1 },
{ SOC15_REG_ENTRY(GC, 0, regCPC_EDC_UCODE_CNT), 0, 1, 1 },
{ SOC15_REG_ENTRY(GC, 0, regDC_EDC_STATE_CNT), 0, 1, 1 },
{ SOC15_REG_ENTRY(GC, 0, regDC_EDC_CSINVOC_CNT), 0, 1, 1 },
{ SOC15_REG_ENTRY(GC, 0, regDC_EDC_RESTORE_CNT), 0, 1, 1 },
/* GDS */
{ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_CNT), 0, 1, 1 },
{ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_GRBM_CNT), 0, 1, 1 },
{ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 1, 1 },
{ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PHY_CNT), 0, 1, 1 },
{ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PIPE_CNT), 0, 1, 1 },
/* RLC */
{ SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT), 0, 1, 1 },
{ SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2), 0, 1, 1 },
/* SPI */
{ SOC15_REG_ENTRY(GC, 0, regSPI_EDC_CNT), 0, 8, 1 },
/* SQC */
{ SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT), 0, 8, 7 },
{ SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2), 0, 8, 7 },
{ SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT3), 0, 8, 7 },
{ SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3), 0, 8, 7 },
/* SQ */
{ SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT), 0, 8, 14 },
/* TCP */
{ SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW), 0, 8, 14 },
/* TCI */
{ SOC15_REG_ENTRY(GC, 0, regTCI_EDC_CNT), 0, 1, 69 },
/* TCC */
{ SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT), 0, 1, 16 },
{ SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2), 0, 1, 16 },
/* TCA */
{ SOC15_REG_ENTRY(GC, 0, regTCA_EDC_CNT), 0, 1, 2 },
/* TCX */
{ SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT), 0, 1, 2 },
{ SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT2), 0, 1, 2 },
/* TD */
{ SOC15_REG_ENTRY(GC, 0, regTD_EDC_CNT), 0, 8, 14 },
/* TA */
{ SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT), 0, 8, 14 },
/* GCEA */
{ SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT), 0, 1, 16 },
{ SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2), 0, 1, 16 },
{ SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 1, 16 },
};
static void gfx_v9_4_2_select_se_sh(struct amdgpu_device *adev, u32 se_num,
u32 sh_num, u32 instance)
{
u32 data;
if (instance == 0xffffffff)
data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
INSTANCE_BROADCAST_WRITES, 1);
else
data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
instance);
if (se_num == 0xffffffff)
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
1);
else
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
if (sh_num == 0xffffffff)
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES,
1);
else
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
WREG32_SOC15_RLC_SHADOW_EX(reg, GC, 0, regGRBM_GFX_INDEX, data);
}
static const struct soc15_ras_field_entry gfx_v9_4_2_ras_fields[] = {
/* CPF */
{ "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, regCPF_EDC_ROQ_CNT),
SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME2),
SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME2) },
{ "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, regCPF_EDC_ROQ_CNT),
SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME1),
SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME1) },
{ "CPF_TCIU_TAG", SOC15_REG_ENTRY(GC, 0, regCPF_EDC_TAG_CNT),
SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT) },
/* CPC */
{ "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, regCPC_EDC_SCRATCH_CNT),
SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT) },
{ "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, regCPC_EDC_UCODE_CNT),
SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT) },
{ "CPC_DC_STATE_RAM_ME1", SOC15_REG_ENTRY(GC, 0, regDC_EDC_STATE_CNT),
SOC15_REG_FIELD(DC_EDC_STATE_CNT, SEC_COUNT_ME1),
SOC15_REG_FIELD(DC_EDC_STATE_CNT, DED_COUNT_ME1) },
{ "CPC_DC_CSINVOC_RAM_ME1",
SOC15_REG_ENTRY(GC, 0, regDC_EDC_CSINVOC_CNT),
SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT_ME1),
SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT_ME1) },
{ "CPC_DC_RESTORE_RAM_ME1",
SOC15_REG_ENTRY(GC, 0, regDC_EDC_RESTORE_CNT),
SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT_ME1),
SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT_ME1) },
{ "CPC_DC_CSINVOC_RAM1_ME1",
SOC15_REG_ENTRY(GC, 0, regDC_EDC_CSINVOC_CNT),
SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT1_ME1),
SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT1_ME1) },
{ "CPC_DC_RESTORE_RAM1_ME1",
SOC15_REG_ENTRY(GC, 0, regDC_EDC_RESTORE_CNT),
SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT1_ME1),
SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT1_ME1) },
/* GDS */
{ "GDS_GRBM", SOC15_REG_ENTRY(GC, 0, regGDS_EDC_GRBM_CNT),
SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, SEC),
SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, DED) },
{ "GDS_MEM", SOC15_REG_ENTRY(GC, 0, regGDS_EDC_CNT),
SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED) },
{ "GDS_PHY_CMD_RAM_MEM", SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PHY_CNT),
SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED) },
{ "GDS_PHY_DATA_RAM_MEM", SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PHY_CNT),
SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SEC),
SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_DED) },
{ "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PHY_CNT),
SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED) },
{ "GDS_ME1_PIPE0_PIPE_MEM",
SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PIPE_CNT),
SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED) },
{ "GDS_ME1_PIPE1_PIPE_MEM",
SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PIPE_CNT),
SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED) },
{ "GDS_ME1_PIPE2_PIPE_MEM",
SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PIPE_CNT),
SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED) },
{ "GDS_ME1_PIPE3_PIPE_MEM",
SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PIPE_CNT),
SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED) },
{ "GDS_ME0_GFXHP3D_PIX_DED",
SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
SOC15_REG_FIELD(GDS_EDC_OA_DED, ME0_GFXHP3D_PIX_DED) },
{ "GDS_ME0_GFXHP3D_VTX_DED",
SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
SOC15_REG_FIELD(GDS_EDC_OA_DED, ME0_GFXHP3D_VTX_DED) },
{ "GDS_ME0_CS_DED",
SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
SOC15_REG_FIELD(GDS_EDC_OA_DED, ME0_CS_DED) },
{ "GDS_ME0_GFXHP3D_GS_DED",
SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
SOC15_REG_FIELD(GDS_EDC_OA_DED, ME0_GFXHP3D_GS_DED) },
{ "GDS_ME1_PIPE0_DED",
SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
SOC15_REG_FIELD(GDS_EDC_OA_DED, ME1_PIPE0_DED) },
{ "GDS_ME1_PIPE1_DED",
SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
SOC15_REG_FIELD(GDS_EDC_OA_DED, ME1_PIPE1_DED) },
{ "GDS_ME1_PIPE2_DED",
SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
SOC15_REG_FIELD(GDS_EDC_OA_DED, ME1_PIPE2_DED) },
{ "GDS_ME1_PIPE3_DED",
SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
SOC15_REG_FIELD(GDS_EDC_OA_DED, ME1_PIPE3_DED) },
{ "GDS_ME2_PIPE0_DED",
SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
SOC15_REG_FIELD(GDS_EDC_OA_DED, ME2_PIPE0_DED) },
{ "GDS_ME2_PIPE1_DED",
SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
SOC15_REG_FIELD(GDS_EDC_OA_DED, ME2_PIPE1_DED) },
{ "GDS_ME2_PIPE2_DED",
SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
SOC15_REG_FIELD(GDS_EDC_OA_DED, ME2_PIPE2_DED) },
{ "GDS_ME2_PIPE3_DED",
SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
SOC15_REG_FIELD(GDS_EDC_OA_DED, ME2_PIPE3_DED) },
/* RLC */
{ "RLCG_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_DED_COUNT) },
{ "RLCG_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_DED_COUNT) },
{ "RLCV_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_DED_COUNT) },
{ "RLCV_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_DED_COUNT) },
{ "RLC_TCTAG_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_DED_COUNT) },
{ "RLC_SPM_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_DED_COUNT) },
{ "RLC_SRM_DATA_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_DED_COUNT) },
{ "RLC_SRM_ADDR_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_DED_COUNT) },
{ "RLC_SPM_SE0_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT) },
{ "RLC_SPM_SE1_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT) },
{ "RLC_SPM_SE2_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT) },
{ "RLC_SPM_SE3_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT) },
{ "RLC_SPM_SE4_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT) },
{ "RLC_SPM_SE5_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT) },
{ "RLC_SPM_SE6_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT) },
{ "RLC_SPM_SE7_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT),
SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT) },
/* SPI */
{ "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, regSPI_EDC_CNT),
SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SEC_COUNT),
SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_DED_COUNT) },
{ "SPI_GDS_EXPREQ", SOC15_REG_ENTRY(GC, 0, regSPI_EDC_CNT),
SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_SEC_COUNT),
SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_DED_COUNT) },
{ "SPI_WB_GRANT_30", SOC15_REG_ENTRY(GC, 0, regSPI_EDC_CNT),
SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_SEC_COUNT),
SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_DED_COUNT) },
{ "SPI_LIFE_CNT", SOC15_REG_ENTRY(GC, 0, regSPI_EDC_CNT),
SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_SEC_COUNT),
SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_DED_COUNT) },
/* SQC - regSQC_EDC_CNT */
{ "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT) },
{ "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT) },
{ "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT) },
{ "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT) },
{ "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT) },
{ "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT) },
{ "SQC_DATA_CU3_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU3_WRITE_DATA_BUF_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU3_WRITE_DATA_BUF_DED_COUNT) },
{ "SQC_DATA_CU3_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU3_UTCL1_LFIFO_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU3_UTCL1_LFIFO_DED_COUNT) },
/* SQC - regSQC_EDC_CNT2 */
{ "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2),
SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT) },
{ "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2),
SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT) },
{ "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2),
SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT) },
{ "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2),
SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT) },
{ "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2),
SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT) },
{ "SQC_DATA_BANKA_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2),
SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_DED_COUNT) },
/* SQC - regSQC_EDC_CNT3 */
{ "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT3),
SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT) },
{ "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT3),
SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT) },
{ "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT3),
SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT) },
{ "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT3),
SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT) },
{ "SQC_DATA_BANKB_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT3),
SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_DED_COUNT) },
/* SQC - regSQC_EDC_PARITY_CNT3 */
{ "SQC_INST_BANKA_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT) },
{ "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKA_MISS_FIFO_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKA_MISS_FIFO_DED_COUNT) },
{ "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_DED_COUNT) },
{ "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_MISS_FIFO_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_MISS_FIFO_DED_COUNT) },
{ "SQC_INST_BANKB_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT) },
{ "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKB_MISS_FIFO_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKB_MISS_FIFO_DED_COUNT) },
{ "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_DED_COUNT) },
{ "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_MISS_FIFO_SEC_COUNT),
SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_MISS_FIFO_DED_COUNT) },
/* SQ */
{ "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT),
SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT) },
{ "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT),
SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT) },
{ "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT),
SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT) },
{ "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT),
SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT) },
{ "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT),
SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT) },
{ "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT),
SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT) },
{ "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT),
SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT) },
/* TCP */
{ "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT) },
{ "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT) },
{ "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SEC_COUNT),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_DED_COUNT) },
{ "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_DED_COUNT) },
{ "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SEC_COUNT),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_DED_COUNT) },
{ "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT) },
{ "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT) },
/* TCI */
{ "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, regTCI_EDC_CNT),
SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SEC_COUNT),
SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_DED_COUNT) },
/* TCC */
{ "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT),
SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT) },
{ "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT),
SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT) },
{ "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT),
SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT) },
{ "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT),
SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT) },
{ "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT),
SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT) },
{ "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT),
SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_DED_COUNT) },
{ "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT),
SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_DED_COUNT) },
{ "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_DED_COUNT) },
{ "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_DED_COUNT) },
{ "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_DED_COUNT) },
{ "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_DED_COUNT) },
{ "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_DED_COUNT) },
{ "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_DED_COUNT) },
{ "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_DED_COUNT) },
{ "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_SEC_COUNT),
SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_DED_COUNT) },
/* TCA */
{ "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, regTCA_EDC_CNT),
SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SEC_COUNT),
SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_DED_COUNT) },
{ "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, regTCA_EDC_CNT),
SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SEC_COUNT),
SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_DED_COUNT) },
/* TCX */
{ "TCX_GROUP0", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
SOC15_REG_FIELD(TCX_EDC_CNT, GROUP0_SEC_COUNT),
SOC15_REG_FIELD(TCX_EDC_CNT, GROUP0_DED_COUNT) },
{ "TCX_GROUP1", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
SOC15_REG_FIELD(TCX_EDC_CNT, GROUP1_SEC_COUNT),
SOC15_REG_FIELD(TCX_EDC_CNT, GROUP1_DED_COUNT) },
{ "TCX_GROUP2", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
SOC15_REG_FIELD(TCX_EDC_CNT, GROUP2_SEC_COUNT),
SOC15_REG_FIELD(TCX_EDC_CNT, GROUP2_DED_COUNT) },
{ "TCX_GROUP3", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
SOC15_REG_FIELD(TCX_EDC_CNT, GROUP3_SEC_COUNT),
SOC15_REG_FIELD(TCX_EDC_CNT, GROUP3_DED_COUNT) },
{ "TCX_GROUP4", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
SOC15_REG_FIELD(TCX_EDC_CNT, GROUP4_SEC_COUNT),
SOC15_REG_FIELD(TCX_EDC_CNT, GROUP4_DED_COUNT) },
{ "TCX_GROUP5", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
SOC15_REG_FIELD(TCX_EDC_CNT, GROUP5_SED_COUNT), 0, 0 },
{ "TCX_GROUP6", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
SOC15_REG_FIELD(TCX_EDC_CNT, GROUP6_SED_COUNT), 0, 0 },
{ "TCX_GROUP7", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
SOC15_REG_FIELD(TCX_EDC_CNT, GROUP7_SED_COUNT), 0, 0 },
{ "TCX_GROUP8", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
SOC15_REG_FIELD(TCX_EDC_CNT, GROUP8_SED_COUNT), 0, 0 },
{ "TCX_GROUP9", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
SOC15_REG_FIELD(TCX_EDC_CNT, GROUP9_SED_COUNT), 0, 0 },
{ "TCX_GROUP10", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
SOC15_REG_FIELD(TCX_EDC_CNT, GROUP10_SED_COUNT), 0, 0 },
{ "TCX_GROUP11", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT2),
SOC15_REG_FIELD(TCX_EDC_CNT2, GROUP11_SED_COUNT), 0, 0 },
{ "TCX_GROUP12", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT2),
SOC15_REG_FIELD(TCX_EDC_CNT2, GROUP12_SED_COUNT), 0, 0 },
{ "TCX_GROUP13", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT2),
SOC15_REG_FIELD(TCX_EDC_CNT2, GROUP13_SED_COUNT), 0, 0 },
{ "TCX_GROUP14", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT2),
SOC15_REG_FIELD(TCX_EDC_CNT2, GROUP14_SED_COUNT), 0, 0 },
/* TD */
{ "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, regTD_EDC_CNT),
SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT) },
{ "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, regTD_EDC_CNT),
SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT) },
{ "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, regTD_EDC_CNT),
SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SEC_COUNT),
SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_DED_COUNT) },
/* TA */
{ "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT),
SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT) },
{ "TA_FS_AFIFO_LO", SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT),
SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_LO_SEC_COUNT),
SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_LO_DED_COUNT) },
{ "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT),
SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SEC_COUNT),
SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_DED_COUNT) },
{ "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT),
SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SEC_COUNT),
SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_DED_COUNT) },
{ "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT),
SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SEC_COUNT),
SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_DED_COUNT) },
{ "TA_FS_AFIFO_HI", SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT),
SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_HI_SEC_COUNT),
SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_HI_DED_COUNT) },
/* EA - regGCEA_EDC_CNT */
{ "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT) },
{ "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT) },
{ "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT) },
{ "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT) },
{ "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT) },
{ "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_DED_COUNT) },
{ "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 0, 0 },
{ "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 0, 0 },
{ "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT), 0, 0 },
{ "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 0, 0 },
/* EA - regGCEA_EDC_CNT2 */
{ "EA_GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT) },
{ "EA_GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT) },
{ "EA_GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT) },
{ "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 0, 0 },
{ "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 0, 0 },
{ "EA_MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_DED_COUNT) },
{ "EA_MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_DED_COUNT) },
{ "EA_MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_DED_COUNT) },
{ "EA_MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_DED_COUNT) },
/* EA - regGCEA_EDC_CNT3 */
{ "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 0,
SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT) },
{ "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 0,
SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT) },
{ "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 0,
SOC15_REG_FIELD(GCEA_EDC_CNT3, IORD_CMDMEM_DED_COUNT) },
{ "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 0,
SOC15_REG_FIELD(GCEA_EDC_CNT3, IOWR_CMDMEM_DED_COUNT) },
{ "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 0,
SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT) },
{ "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 0,
SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT) },
{ "EA_MAM_A0MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3),
SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_DED_COUNT) },
{ "EA_MAM_A1MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3),
SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_DED_COUNT) },
{ "EA_MAM_A2MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3),
SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_DED_COUNT) },
{ "EA_MAM_A3MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3),
SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_DED_COUNT) },
{ "EA_MAM_AFMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3),
SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_AFMEM_SEC_COUNT),
SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_AFMEM_DED_COUNT) },
};
static const char * const vml2_walker_mems[] = {
"UTC_VML2_CACHE_PDE0_MEM0",
"UTC_VML2_CACHE_PDE0_MEM1",
"UTC_VML2_CACHE_PDE1_MEM0",
"UTC_VML2_CACHE_PDE1_MEM1",
"UTC_VML2_CACHE_PDE2_MEM0",
"UTC_VML2_CACHE_PDE2_MEM1",
"UTC_VML2_RDIF_ARADDRS",
"UTC_VML2_RDIF_LOG_FIFO",
"UTC_VML2_QUEUE_REQ",
"UTC_VML2_QUEUE_RET",
};
static struct gfx_v9_4_2_utc_block gfx_v9_4_2_utc_blocks[] = {
{ VML2_MEM, 8, 2, 2,
{ SOC15_REG_ENTRY(GC, 0, regVML2_MEM_ECC_INDEX) },
{ SOC15_REG_ENTRY(GC, 0, regVML2_MEM_ECC_CNTL) },
SOC15_REG_FIELD(VML2_MEM_ECC_CNTL, SEC_COUNT),
SOC15_REG_FIELD(VML2_MEM_ECC_CNTL, DED_COUNT),
REG_SET_FIELD(0, VML2_MEM_ECC_CNTL, WRITE_COUNTERS, 1) },
{ VML2_WALKER_MEM, ARRAY_SIZE(vml2_walker_mems), 1, 1,
{ SOC15_REG_ENTRY(GC, 0, regVML2_WALKER_MEM_ECC_INDEX) },
{ SOC15_REG_ENTRY(GC, 0, regVML2_WALKER_MEM_ECC_CNTL) },
SOC15_REG_FIELD(VML2_WALKER_MEM_ECC_CNTL, SEC_COUNT),
SOC15_REG_FIELD(VML2_WALKER_MEM_ECC_CNTL, DED_COUNT),
REG_SET_FIELD(0, VML2_WALKER_MEM_ECC_CNTL, WRITE_COUNTERS, 1) },
{ UTCL2_MEM, 18, 1, 2,
{ SOC15_REG_ENTRY(GC, 0, regUTCL2_MEM_ECC_INDEX) },
{ SOC15_REG_ENTRY(GC, 0, regUTCL2_MEM_ECC_CNTL) },
SOC15_REG_FIELD(UTCL2_MEM_ECC_CNTL, SEC_COUNT),
SOC15_REG_FIELD(UTCL2_MEM_ECC_CNTL, DED_COUNT),
REG_SET_FIELD(0, UTCL2_MEM_ECC_CNTL, WRITE_COUNTERS, 1) },
{ ATC_L2_CACHE_2M, 8, 2, 1,
{ SOC15_REG_ENTRY(GC, 0, regATC_L2_CACHE_2M_DSM_INDEX) },
{ SOC15_REG_ENTRY(GC, 0, regATC_L2_CACHE_2M_DSM_CNTL) },
SOC15_REG_FIELD(ATC_L2_CACHE_2M_DSM_CNTL, SEC_COUNT),
SOC15_REG_FIELD(ATC_L2_CACHE_2M_DSM_CNTL, DED_COUNT),
REG_SET_FIELD(0, ATC_L2_CACHE_2M_DSM_CNTL, WRITE_COUNTERS, 1) },
{ ATC_L2_CACHE_32K, 8, 2, 2,
{ SOC15_REG_ENTRY(GC, 0, regATC_L2_CACHE_32K_DSM_INDEX) },
{ SOC15_REG_ENTRY(GC, 0, regATC_L2_CACHE_32K_DSM_CNTL) },
SOC15_REG_FIELD(ATC_L2_CACHE_32K_DSM_CNTL, SEC_COUNT),
SOC15_REG_FIELD(ATC_L2_CACHE_32K_DSM_CNTL, DED_COUNT),
REG_SET_FIELD(0, ATC_L2_CACHE_32K_DSM_CNTL, WRITE_COUNTERS, 1) },
{ ATC_L2_CACHE_4K, 8, 2, 8,
{ SOC15_REG_ENTRY(GC, 0, regATC_L2_CACHE_4K_DSM_INDEX) },
{ SOC15_REG_ENTRY(GC, 0, regATC_L2_CACHE_4K_DSM_CNTL) },
SOC15_REG_FIELD(ATC_L2_CACHE_4K_DSM_CNTL, SEC_COUNT),
SOC15_REG_FIELD(ATC_L2_CACHE_4K_DSM_CNTL, DED_COUNT),
REG_SET_FIELD(0, ATC_L2_CACHE_4K_DSM_CNTL, WRITE_COUNTERS, 1) },
};
static const struct soc15_reg_entry gfx_v9_4_2_ea_err_status_regs = {
SOC15_REG_ENTRY(GC, 0, regGCEA_ERR_STATUS), 0, 1, 16
};
static int gfx_v9_4_2_get_reg_error_count(struct amdgpu_device *adev,
const struct soc15_reg_entry *reg,
uint32_t se_id, uint32_t inst_id,
uint32_t value, uint32_t *sec_count,
uint32_t *ded_count)
{
uint32_t i;
uint32_t sec_cnt, ded_cnt;
for (i = 0; i < ARRAY_SIZE(gfx_v9_4_2_ras_fields); i++) {
if (gfx_v9_4_2_ras_fields[i].reg_offset != reg->reg_offset ||
gfx_v9_4_2_ras_fields[i].seg != reg->seg ||
gfx_v9_4_2_ras_fields[i].inst != reg->inst)
continue;
sec_cnt = SOC15_RAS_REG_FIELD_VAL(
value, gfx_v9_4_2_ras_fields[i], sec);
if (sec_cnt) {
dev_info(adev->dev,
"GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
gfx_v9_4_2_ras_fields[i].name, se_id, inst_id,
sec_cnt);
*sec_count += sec_cnt;
}
ded_cnt = SOC15_RAS_REG_FIELD_VAL(
value, gfx_v9_4_2_ras_fields[i], ded);
if (ded_cnt) {
dev_info(adev->dev,
"GFX SubBlock %s, Instance[%d][%d], DED %d\n",
gfx_v9_4_2_ras_fields[i].name, se_id, inst_id,
ded_cnt);
*ded_count += ded_cnt;
}
}
return 0;
}
static int gfx_v9_4_2_query_sram_edc_count(struct amdgpu_device *adev,
uint32_t *sec_count, uint32_t *ded_count)
{
uint32_t i, j, k, data;
uint32_t sec_cnt = 0, ded_cnt = 0;
if (sec_count && ded_count) {
*sec_count = 0;
*ded_count = 0;
}
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < ARRAY_SIZE(gfx_v9_4_2_edc_counter_regs); i++) {
for (j = 0; j < gfx_v9_4_2_edc_counter_regs[i].se_num; j++) {
for (k = 0; k < gfx_v9_4_2_edc_counter_regs[i].instance;
k++) {
gfx_v9_4_2_select_se_sh(adev, j, 0, k);
/* if sec/ded_count is null, just clear counter */
if (!sec_count || !ded_count) {
WREG32(SOC15_REG_ENTRY_OFFSET(
gfx_v9_4_2_edc_counter_regs[i]), 0);
continue;
}
data = RREG32(SOC15_REG_ENTRY_OFFSET(
gfx_v9_4_2_edc_counter_regs[i]));
if (!data)
continue;
gfx_v9_4_2_get_reg_error_count(adev,
&gfx_v9_4_2_edc_counter_regs[i],
j, k, data, &sec_cnt, &ded_cnt);
/* clear counter after read */
WREG32(SOC15_REG_ENTRY_OFFSET(
gfx_v9_4_2_edc_counter_regs[i]), 0);
}
}
}
if (sec_count && ded_count) {
*sec_count += sec_cnt;
*ded_count += ded_cnt;
}
gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
}
static void gfx_v9_4_2_log_utc_edc_count(struct amdgpu_device *adev,
struct gfx_v9_4_2_utc_block *blk,
uint32_t instance, uint32_t sec_cnt,
uint32_t ded_cnt)
{
uint32_t bank, way, mem;
static const char *vml2_way_str[] = { "BIGK", "4K" };
static const char *utcl2_rounter_str[] = { "VMC", "APT" };
mem = instance % blk->num_mem_blocks;
way = (instance / blk->num_mem_blocks) % blk->num_ways;
bank = instance / (blk->num_mem_blocks * blk->num_ways);
switch (blk->type) {
case VML2_MEM:
dev_info(
adev->dev,
"GFX SubBlock UTC_VML2_BANK_CACHE_%d_%s_MEM%d, SED %d, DED %d\n",
bank, vml2_way_str[way], mem, sec_cnt, ded_cnt);
break;
case VML2_WALKER_MEM:
dev_info(adev->dev, "GFX SubBlock %s, SED %d, DED %d\n",
vml2_walker_mems[bank], sec_cnt, ded_cnt);
break;
case UTCL2_MEM:
dev_info(
adev->dev,
"GFX SubBlock UTCL2_ROUTER_IFIF%d_GROUP0_%s, SED %d, DED %d\n",
bank, utcl2_rounter_str[mem], sec_cnt, ded_cnt);
break;
case ATC_L2_CACHE_2M:
dev_info(
adev->dev,
"GFX SubBlock UTC_ATCL2_CACHE_2M_BANK%d_WAY%d_MEM, SED %d, DED %d\n",
bank, way, sec_cnt, ded_cnt);
break;
case ATC_L2_CACHE_32K:
dev_info(
adev->dev,
"GFX SubBlock UTC_ATCL2_CACHE_32K_BANK%d_WAY%d_MEM%d, SED %d, DED %d\n",
bank, way, mem, sec_cnt, ded_cnt);
break;
case ATC_L2_CACHE_4K:
dev_info(
adev->dev,
"GFX SubBlock UTC_ATCL2_CACHE_4K_BANK%d_WAY%d_MEM%d, SED %d, DED %d\n",
bank, way, mem, sec_cnt, ded_cnt);
break;
}
}
static int gfx_v9_4_2_query_utc_edc_count(struct amdgpu_device *adev,
uint32_t *sec_count,
uint32_t *ded_count)
{
uint32_t i, j, data;
uint32_t sec_cnt, ded_cnt;
uint32_t num_instances;
struct gfx_v9_4_2_utc_block *blk;
if (sec_count && ded_count) {
*sec_count = 0;
*ded_count = 0;
}
for (i = 0; i < ARRAY_SIZE(gfx_v9_4_2_utc_blocks); i++) {
blk = &gfx_v9_4_2_utc_blocks[i];
num_instances =
blk->num_banks * blk->num_ways * blk->num_mem_blocks;
for (j = 0; j < num_instances; j++) {
WREG32(SOC15_REG_ENTRY_OFFSET(blk->idx_reg), j);
/* if sec/ded_count is NULL, just clear counter */
if (!sec_count || !ded_count) {
WREG32(SOC15_REG_ENTRY_OFFSET(blk->data_reg),
blk->clear);
continue;
}
data = RREG32(SOC15_REG_ENTRY_OFFSET(blk->data_reg));
if (!data)
continue;
sec_cnt = SOC15_RAS_REG_FIELD_VAL(data, *blk, sec);
*sec_count += sec_cnt;
ded_cnt = SOC15_RAS_REG_FIELD_VAL(data, *blk, ded);
*ded_count += ded_cnt;
/* clear counter after read */
WREG32(SOC15_REG_ENTRY_OFFSET(blk->data_reg),
blk->clear);
/* print the edc count */
if (sec_cnt || ded_cnt)
gfx_v9_4_2_log_utc_edc_count(adev, blk, j, sec_cnt,
ded_cnt);
}
}
return 0;
}
static void gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
uint32_t sec_count = 0, ded_count = 0;
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
return;
err_data->ue_count = 0;
err_data->ce_count = 0;
gfx_v9_4_2_query_sram_edc_count(adev, &sec_count, &ded_count);
err_data->ce_count += sec_count;
err_data->ue_count += ded_count;
gfx_v9_4_2_query_utc_edc_count(adev, &sec_count, &ded_count);
err_data->ce_count += sec_count;
err_data->ue_count += ded_count;
}
static void gfx_v9_4_2_reset_utc_err_status(struct amdgpu_device *adev)
{
WREG32_SOC15(GC, 0, regUTCL2_MEM_ECC_STATUS, 0x3);
WREG32_SOC15(GC, 0, regVML2_MEM_ECC_STATUS, 0x3);
WREG32_SOC15(GC, 0, regVML2_WALKER_MEM_ECC_STATUS, 0x3);
}
static void gfx_v9_4_2_reset_ea_err_status(struct amdgpu_device *adev)
{
uint32_t i, j;
uint32_t value;
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < gfx_v9_4_2_ea_err_status_regs.se_num; i++) {
for (j = 0; j < gfx_v9_4_2_ea_err_status_regs.instance;
j++) {
gfx_v9_4_2_select_se_sh(adev, i, 0, j);
value = RREG32(SOC15_REG_ENTRY_OFFSET(
gfx_v9_4_2_ea_err_status_regs));
value = REG_SET_FIELD(value, GCEA_ERR_STATUS, CLEAR_ERROR_STATUS, 0x1);
WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_ea_err_status_regs), value);
}
}
gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
}
static void gfx_v9_4_2_reset_ras_error_count(struct amdgpu_device *adev)
{
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
return;
gfx_v9_4_2_query_sram_edc_count(adev, NULL, NULL);
gfx_v9_4_2_query_utc_edc_count(adev, NULL, NULL);
}
static void gfx_v9_4_2_query_ea_err_status(struct amdgpu_device *adev)
{
uint32_t i, j;
uint32_t reg_value;
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < gfx_v9_4_2_ea_err_status_regs.se_num; i++) {
for (j = 0; j < gfx_v9_4_2_ea_err_status_regs.instance;
j++) {
gfx_v9_4_2_select_se_sh(adev, i, 0, j);
reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(
gfx_v9_4_2_ea_err_status_regs));
if (REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_STATUS) ||
REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_WRRSP_STATUS) ||
REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) {
dev_warn(adev->dev, "GCEA err detected at instance: %d, status: 0x%x!\n",
j, reg_value);
}
/* clear after read */
reg_value = REG_SET_FIELD(reg_value, GCEA_ERR_STATUS,
CLEAR_ERROR_STATUS, 0x1);
WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_ea_err_status_regs), reg_value);
}
}
gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
}
static void gfx_v9_4_2_query_utc_err_status(struct amdgpu_device *adev)
{
uint32_t data;
data = RREG32_SOC15(GC, 0, regUTCL2_MEM_ECC_STATUS);
if (data) {
dev_warn(adev->dev, "GFX UTCL2 Mem Ecc Status: 0x%x!\n", data);
WREG32_SOC15(GC, 0, regUTCL2_MEM_ECC_STATUS, 0x3);
}
data = RREG32_SOC15(GC, 0, regVML2_MEM_ECC_STATUS);
if (data) {
dev_warn(adev->dev, "GFX VML2 Mem Ecc Status: 0x%x!\n", data);
WREG32_SOC15(GC, 0, regVML2_MEM_ECC_STATUS, 0x3);
}
data = RREG32_SOC15(GC, 0, regVML2_WALKER_MEM_ECC_STATUS);
if (data) {
dev_warn(adev->dev, "GFX VML2 Walker Mem Ecc Status: 0x%x!\n", data);
WREG32_SOC15(GC, 0, regVML2_WALKER_MEM_ECC_STATUS, 0x3);
}
}
static void gfx_v9_4_2_query_ras_error_status(struct amdgpu_device *adev)
{
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
return;
gfx_v9_4_2_query_ea_err_status(adev);
gfx_v9_4_2_query_utc_err_status(adev);
gfx_v9_4_2_query_sq_timeout_status(adev);
}
static void gfx_v9_4_2_reset_ras_error_status(struct amdgpu_device *adev)
{
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
return;
gfx_v9_4_2_reset_utc_err_status(adev);
gfx_v9_4_2_reset_ea_err_status(adev);
gfx_v9_4_2_reset_sq_timeout_status(adev);
}
static void gfx_v9_4_2_enable_watchdog_timer(struct amdgpu_device *adev)
{
uint32_t i;
uint32_t data;
data = REG_SET_FIELD(0, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE,
amdgpu_watchdog_timer.timeout_fatal_disable ? 1 :
0);
if (amdgpu_watchdog_timer.timeout_fatal_disable &&
(amdgpu_watchdog_timer.period < 1 ||
amdgpu_watchdog_timer.period > 0x23)) {
dev_warn(adev->dev, "Watchdog period range is 1 to 0x23\n");
amdgpu_watchdog_timer.period = 0x23;
}
data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, PERIOD_SEL,
amdgpu_watchdog_timer.period);
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
gfx_v9_4_2_select_se_sh(adev, i, 0xffffffff, 0xffffffff);
WREG32_SOC15(GC, 0, regSQ_TIMEOUT_CONFIG, data);
}
gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
}
static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
{
WREG32_SOC15_RLC_EX(reg, GC, 0, regSQ_IND_INDEX,
(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
(address << SQ_IND_INDEX__INDEX__SHIFT) |
(SQ_IND_INDEX__FORCE_READ_MASK));
return RREG32_SOC15(GC, 0, regSQ_IND_DATA);
}
static void gfx_v9_4_2_log_cu_timeout_status(struct amdgpu_device *adev,
uint32_t status)
{
struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
uint32_t i, simd, wave;
uint32_t wave_status;
uint32_t wave_pc_lo, wave_pc_hi;
uint32_t wave_exec_lo, wave_exec_hi;
uint32_t wave_inst_dw0, wave_inst_dw1;
uint32_t wave_ib_sts;
for (i = 0; i < 32; i++) {
if (!((i << 1) & status))
continue;
simd = i / cu_info->max_waves_per_simd;
wave = i % cu_info->max_waves_per_simd;
wave_status = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
wave_pc_lo = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
wave_pc_hi = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
wave_exec_lo =
wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
wave_exec_hi =
wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
wave_inst_dw0 =
wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
wave_inst_dw1 =
wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
wave_ib_sts = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
dev_info(
adev->dev,
"\t SIMD %d, Wave %d: status 0x%x, pc 0x%llx, exec 0x%llx, inst 0x%llx, ib_sts 0x%x\n",
simd, wave, wave_status,
((uint64_t)wave_pc_hi << 32 | wave_pc_lo),
((uint64_t)wave_exec_hi << 32 | wave_exec_lo),
((uint64_t)wave_inst_dw1 << 32 | wave_inst_dw0),
wave_ib_sts);
}
}
static void gfx_v9_4_2_query_sq_timeout_status(struct amdgpu_device *adev)
{
uint32_t se_idx, sh_idx, cu_idx;
uint32_t status;
mutex_lock(&adev->grbm_idx_mutex);
for (se_idx = 0; se_idx < adev->gfx.config.max_shader_engines;
se_idx++) {
for (sh_idx = 0; sh_idx < adev->gfx.config.max_sh_per_se;
sh_idx++) {
for (cu_idx = 0;
cu_idx < adev->gfx.config.max_cu_per_sh;
cu_idx++) {
gfx_v9_4_2_select_se_sh(adev, se_idx, sh_idx,
cu_idx);
status = RREG32_SOC15(GC, 0,
regSQ_TIMEOUT_STATUS);
if (status != 0) {
dev_info(
adev->dev,
"GFX Watchdog Timeout: SE %d, SH %d, CU %d\n",
se_idx, sh_idx, cu_idx);
gfx_v9_4_2_log_cu_timeout_status(
adev, status);
}
/* clear old status */
WREG32_SOC15(GC, 0, regSQ_TIMEOUT_STATUS, 0);
}
}
}
gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
}
static void gfx_v9_4_2_reset_sq_timeout_status(struct amdgpu_device *adev)
{
uint32_t se_idx, sh_idx, cu_idx;
mutex_lock(&adev->grbm_idx_mutex);
for (se_idx = 0; se_idx < adev->gfx.config.max_shader_engines;
se_idx++) {
for (sh_idx = 0; sh_idx < adev->gfx.config.max_sh_per_se;
sh_idx++) {
for (cu_idx = 0;
cu_idx < adev->gfx.config.max_cu_per_sh;
cu_idx++) {
gfx_v9_4_2_select_se_sh(adev, se_idx, sh_idx,
cu_idx);
WREG32_SOC15(GC, 0, regSQ_TIMEOUT_STATUS, 0);
}
}
}
gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
}
static bool gfx_v9_4_2_query_uctl2_poison_status(struct amdgpu_device *adev)
{
u32 status = 0;
struct amdgpu_vmhub *hub;
hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
status = RREG32(hub->vm_l2_pro_fault_status);
/* reset page fault status */
WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
return REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
}
struct amdgpu_ras_block_hw_ops gfx_v9_4_2_ras_ops = {
.query_ras_error_count = &gfx_v9_4_2_query_ras_error_count,
.reset_ras_error_count = &gfx_v9_4_2_reset_ras_error_count,
.query_ras_error_status = &gfx_v9_4_2_query_ras_error_status,
.reset_ras_error_status = &gfx_v9_4_2_reset_ras_error_status,
};
struct amdgpu_gfx_ras gfx_v9_4_2_ras = {
.ras_block = {
.hw_ops = &gfx_v9_4_2_ras_ops,
},
.enable_watchdog_timer = &gfx_v9_4_2_enable_watchdog_timer,
.query_utcl2_poison_status = gfx_v9_4_2_query_uctl2_poison_status,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c |
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "amdgpu_psp_ta.h"
#if defined(CONFIG_DEBUG_FS)
static ssize_t ta_if_load_debugfs_write(struct file *fp, const char *buf,
size_t len, loff_t *off);
static ssize_t ta_if_unload_debugfs_write(struct file *fp, const char *buf,
size_t len, loff_t *off);
static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf,
size_t len, loff_t *off);
static uint32_t get_bin_version(const uint8_t *bin)
{
const struct common_firmware_header *hdr =
(const struct common_firmware_header *)bin;
return hdr->ucode_version;
}
static int prep_ta_mem_context(struct ta_mem_context *mem_context,
uint8_t *shared_buf,
uint32_t shared_buf_len)
{
if (mem_context->shared_mem_size < shared_buf_len)
return -EINVAL;
memset(mem_context->shared_buf, 0, mem_context->shared_mem_size);
memcpy((void *)mem_context->shared_buf, shared_buf, shared_buf_len);
return 0;
}
static bool is_ta_type_valid(enum ta_type_id ta_type)
{
switch (ta_type) {
case TA_TYPE_RAS:
return true;
default:
return false;
}
}
static const struct ta_funcs ras_ta_funcs = {
.fn_ta_initialize = psp_ras_initialize,
.fn_ta_invoke = psp_ras_invoke,
.fn_ta_terminate = psp_ras_terminate
};
static void set_ta_context_funcs(struct psp_context *psp,
enum ta_type_id ta_type,
struct ta_context **pcontext)
{
switch (ta_type) {
case TA_TYPE_RAS:
*pcontext = &psp->ras_context.context;
psp->ta_funcs = &ras_ta_funcs;
break;
default:
break;
}
}
static const struct file_operations ta_load_debugfs_fops = {
.write = ta_if_load_debugfs_write,
.llseek = default_llseek,
.owner = THIS_MODULE
};
static const struct file_operations ta_unload_debugfs_fops = {
.write = ta_if_unload_debugfs_write,
.llseek = default_llseek,
.owner = THIS_MODULE
};
static const struct file_operations ta_invoke_debugfs_fops = {
.write = ta_if_invoke_debugfs_write,
.llseek = default_llseek,
.owner = THIS_MODULE
};
/*
* DOC: AMDGPU TA debugfs interfaces
*
* Three debugfs interfaces can be opened by a program to
* load/invoke/unload TA,
*
* - /sys/kernel/debug/dri/<N>/ta_if/ta_load
* - /sys/kernel/debug/dri/<N>/ta_if/ta_invoke
* - /sys/kernel/debug/dri/<N>/ta_if/ta_unload
*
* How to use the interfaces in a program?
*
* A program needs to provide transmit buffer to the interfaces
* and will receive buffer from the interfaces below,
*
* - For TA load debugfs interface:
* Transmit buffer:
* - TA type (4bytes)
* - TA bin length (4bytes)
* - TA bin
* Receive buffer:
* - TA ID (4bytes)
*
* - For TA invoke debugfs interface:
* Transmit buffer:
* - TA type (4bytes)
* - TA ID (4bytes)
* - TA CMD ID (4bytes)
* - TA shard buf length
* (4bytes, value not beyond TA shared memory size)
* - TA shared buf
* Receive buffer:
* - TA shared buf
*
* - For TA unload debugfs interface:
* Transmit buffer:
* - TA type (4bytes)
* - TA ID (4bytes)
*/
static ssize_t ta_if_load_debugfs_write(struct file *fp, const char *buf, size_t len, loff_t *off)
{
uint32_t ta_type = 0;
uint32_t ta_bin_len = 0;
uint8_t *ta_bin = NULL;
uint32_t copy_pos = 0;
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(fp)->i_private;
struct psp_context *psp = &adev->psp;
struct ta_context *context = NULL;
if (!buf)
return -EINVAL;
ret = copy_from_user((void *)&ta_type, &buf[copy_pos], sizeof(uint32_t));
if (ret || (!is_ta_type_valid(ta_type)))
return -EFAULT;
copy_pos += sizeof(uint32_t);
ret = copy_from_user((void *)&ta_bin_len, &buf[copy_pos], sizeof(uint32_t));
if (ret)
return -EFAULT;
copy_pos += sizeof(uint32_t);
ta_bin = kzalloc(ta_bin_len, GFP_KERNEL);
if (!ta_bin)
return -ENOMEM;
if (copy_from_user((void *)ta_bin, &buf[copy_pos], ta_bin_len)) {
ret = -EFAULT;
goto err_free_bin;
}
/* Set TA context and functions */
set_ta_context_funcs(psp, ta_type, &context);
if (!psp->ta_funcs || !psp->ta_funcs->fn_ta_terminate) {
dev_err(adev->dev, "Unsupported function to terminate TA\n");
ret = -EOPNOTSUPP;
goto err_free_bin;
}
/*
* Allocate TA shared buf in case shared buf was freed
* due to loading TA failed before.
*/
if (!context->mem_context.shared_buf) {
ret = psp_ta_init_shared_buf(psp, &context->mem_context);
if (ret) {
ret = -ENOMEM;
goto err_free_bin;
}
}
ret = psp_fn_ta_terminate(psp);
if (ret || context->resp_status) {
dev_err(adev->dev,
"Failed to unload embedded TA (%d) and status (0x%X)\n",
ret, context->resp_status);
if (!ret)
ret = -EINVAL;
goto err_free_ta_shared_buf;
}
/* Prepare TA context for TA initialization */
context->ta_type = ta_type;
context->bin_desc.fw_version = get_bin_version(ta_bin);
context->bin_desc.size_bytes = ta_bin_len;
context->bin_desc.start_addr = ta_bin;
if (!psp->ta_funcs->fn_ta_initialize) {
dev_err(adev->dev, "Unsupported function to initialize TA\n");
ret = -EOPNOTSUPP;
goto err_free_ta_shared_buf;
}
ret = psp_fn_ta_initialize(psp);
if (ret || context->resp_status) {
dev_err(adev->dev, "Failed to load TA via debugfs (%d) and status (0x%X)\n",
ret, context->resp_status);
if (!ret)
ret = -EINVAL;
goto err_free_ta_shared_buf;
}
if (copy_to_user((char *)buf, (void *)&context->session_id, sizeof(uint32_t)))
ret = -EFAULT;
err_free_ta_shared_buf:
/* Only free TA shared buf when returns error code */
if (ret && context->mem_context.shared_buf)
psp_ta_free_shared_buf(&context->mem_context);
err_free_bin:
kfree(ta_bin);
return ret;
}
static ssize_t ta_if_unload_debugfs_write(struct file *fp, const char *buf, size_t len, loff_t *off)
{
uint32_t ta_type = 0;
uint32_t ta_id = 0;
uint32_t copy_pos = 0;
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(fp)->i_private;
struct psp_context *psp = &adev->psp;
struct ta_context *context = NULL;
if (!buf)
return -EINVAL;
ret = copy_from_user((void *)&ta_type, &buf[copy_pos], sizeof(uint32_t));
if (ret || (!is_ta_type_valid(ta_type)))
return -EFAULT;
copy_pos += sizeof(uint32_t);
ret = copy_from_user((void *)&ta_id, &buf[copy_pos], sizeof(uint32_t));
if (ret)
return -EFAULT;
set_ta_context_funcs(psp, ta_type, &context);
context->session_id = ta_id;
if (!psp->ta_funcs || !psp->ta_funcs->fn_ta_terminate) {
dev_err(adev->dev, "Unsupported function to terminate TA\n");
return -EOPNOTSUPP;
}
ret = psp_fn_ta_terminate(psp);
if (ret || context->resp_status) {
dev_err(adev->dev, "Failed to unload TA via debugfs (%d) and status (0x%X)\n",
ret, context->resp_status);
if (!ret)
ret = -EINVAL;
}
if (context->mem_context.shared_buf)
psp_ta_free_shared_buf(&context->mem_context);
return ret;
}
static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size_t len, loff_t *off)
{
uint32_t ta_type = 0;
uint32_t ta_id = 0;
uint32_t cmd_id = 0;
uint32_t shared_buf_len = 0;
uint8_t *shared_buf = NULL;
uint32_t copy_pos = 0;
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(fp)->i_private;
struct psp_context *psp = &adev->psp;
struct ta_context *context = NULL;
if (!buf)
return -EINVAL;
ret = copy_from_user((void *)&ta_type, &buf[copy_pos], sizeof(uint32_t));
if (ret)
return -EFAULT;
copy_pos += sizeof(uint32_t);
ret = copy_from_user((void *)&ta_id, &buf[copy_pos], sizeof(uint32_t));
if (ret)
return -EFAULT;
copy_pos += sizeof(uint32_t);
ret = copy_from_user((void *)&cmd_id, &buf[copy_pos], sizeof(uint32_t));
if (ret)
return -EFAULT;
copy_pos += sizeof(uint32_t);
ret = copy_from_user((void *)&shared_buf_len, &buf[copy_pos], sizeof(uint32_t));
if (ret)
return -EFAULT;
copy_pos += sizeof(uint32_t);
shared_buf = kzalloc(shared_buf_len, GFP_KERNEL);
if (!shared_buf)
return -ENOMEM;
if (copy_from_user((void *)shared_buf, &buf[copy_pos], shared_buf_len)) {
ret = -EFAULT;
goto err_free_shared_buf;
}
set_ta_context_funcs(psp, ta_type, &context);
if (!context->initialized) {
dev_err(adev->dev, "TA is not initialized\n");
ret = -EINVAL;
goto err_free_shared_buf;
}
if (!psp->ta_funcs || !psp->ta_funcs->fn_ta_invoke) {
dev_err(adev->dev, "Unsupported function to invoke TA\n");
ret = -EOPNOTSUPP;
goto err_free_shared_buf;
}
context->session_id = ta_id;
ret = prep_ta_mem_context(&context->mem_context, shared_buf, shared_buf_len);
if (ret)
goto err_free_shared_buf;
ret = psp_fn_ta_invoke(psp, cmd_id);
if (ret || context->resp_status) {
dev_err(adev->dev, "Failed to invoke TA via debugfs (%d) and status (0x%X)\n",
ret, context->resp_status);
if (!ret) {
ret = -EINVAL;
goto err_free_shared_buf;
}
}
if (copy_to_user((char *)buf, context->mem_context.shared_buf, shared_buf_len))
ret = -EFAULT;
err_free_shared_buf:
kfree(shared_buf);
return ret;
}
void amdgpu_ta_if_debugfs_init(struct amdgpu_device *adev)
{
struct drm_minor *minor = adev_to_drm(adev)->primary;
struct dentry *dir = debugfs_create_dir("ta_if", minor->debugfs_root);
debugfs_create_file("ta_load", 0200, dir, adev,
&ta_load_debugfs_fops);
debugfs_create_file("ta_unload", 0200, dir,
adev, &ta_unload_debugfs_fops);
debugfs_create_file("ta_invoke", 0200, dir,
adev, &ta_invoke_debugfs_fops);
}
#else
void amdgpu_ta_if_debugfs_init(struct amdgpu_device *adev)
{
}
#endif
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c |
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "nbio_v7_7.h"
#include "nbio/nbio_7_7_0_offset.h"
#include "nbio/nbio_7_7_0_sh_mask.h"
#include <uapi/linux/kfd_ioctl.h>
static void nbio_v7_7_remap_hdp_registers(struct amdgpu_device *adev)
{
WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL,
adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_REG_FLUSH_CNTL,
adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
}
static u32 nbio_v7_7_get_rev_id(struct amdgpu_device *adev)
{
u32 tmp;
tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0);
tmp &= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
tmp >>= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
return tmp;
}
static void nbio_v7_7_mc_access_enable(struct amdgpu_device *adev, bool enable)
{
if (enable)
WREG32_SOC15(NBIO, 0, regBIF_BX1_BIF_FB_EN,
BIF_BX1_BIF_FB_EN__FB_READ_EN_MASK |
BIF_BX1_BIF_FB_EN__FB_WRITE_EN_MASK);
else
WREG32_SOC15(NBIO, 0, regBIF_BX1_BIF_FB_EN, 0);
}
static u32 nbio_v7_7_get_memsize(struct amdgpu_device *adev)
{
return RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_0_RCC_CONFIG_MEMSIZE);
}
static void nbio_v7_7_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
bool use_doorbell, int doorbell_index,
int doorbell_size)
{
u32 reg = SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_CSDMA_DOORBELL_RANGE);
u32 doorbell_range = RREG32_PCIE_PORT(reg);
if (use_doorbell) {
doorbell_range = REG_SET_FIELD(doorbell_range,
GDC0_BIF_CSDMA_DOORBELL_RANGE,
OFFSET, doorbell_index);
doorbell_range = REG_SET_FIELD(doorbell_range,
GDC0_BIF_CSDMA_DOORBELL_RANGE,
SIZE, doorbell_size);
} else {
doorbell_range = REG_SET_FIELD(doorbell_range,
GDC0_BIF_SDMA0_DOORBELL_RANGE,
SIZE, 0);
}
WREG32_PCIE_PORT(reg, doorbell_range);
}
static void nbio_v7_7_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell,
int doorbell_index, int instance)
{
u32 reg = SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_VCN0_DOORBELL_RANGE);
u32 doorbell_range = RREG32_PCIE_PORT(reg);
if (use_doorbell) {
doorbell_range = REG_SET_FIELD(doorbell_range,
GDC0_BIF_VCN0_DOORBELL_RANGE, OFFSET,
doorbell_index);
doorbell_range = REG_SET_FIELD(doorbell_range,
GDC0_BIF_VCN0_DOORBELL_RANGE, SIZE, 8);
} else {
doorbell_range = REG_SET_FIELD(doorbell_range,
GDC0_BIF_VCN0_DOORBELL_RANGE, SIZE, 0);
}
WREG32_PCIE_PORT(reg, doorbell_range);
}
static void nbio_v7_7_enable_doorbell_aperture(struct amdgpu_device *adev,
bool enable)
{
u32 reg;
reg = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_0_RCC_DOORBELL_APER_EN);
reg = REG_SET_FIELD(reg, RCC_DEV0_EPF0_0_RCC_DOORBELL_APER_EN,
BIF_DOORBELL_APER_EN, enable ? 1 : 0);
WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_0_RCC_DOORBELL_APER_EN, reg);
}
static void nbio_v7_7_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
bool enable)
{
u32 tmp = 0;
if (enable) {
tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
DOORBELL_SELFRING_GPA_APER_EN, 1) |
REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
DOORBELL_SELFRING_GPA_APER_MODE, 1) |
REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
DOORBELL_SELFRING_GPA_APER_SIZE, 0);
WREG32_SOC15(NBIO, 0,
regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW,
lower_32_bits(adev->doorbell.base));
WREG32_SOC15(NBIO, 0,
regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH,
upper_32_bits(adev->doorbell.base));
}
WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL,
tmp);
}
static void nbio_v7_7_ih_doorbell_range(struct amdgpu_device *adev,
bool use_doorbell, int doorbell_index)
{
u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0,
regGDC0_BIF_IH_DOORBELL_RANGE);
if (use_doorbell) {
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
GDC0_BIF_IH_DOORBELL_RANGE, OFFSET,
doorbell_index);
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
GDC0_BIF_IH_DOORBELL_RANGE, SIZE,
2);
} else {
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
GDC0_BIF_IH_DOORBELL_RANGE, SIZE,
0);
}
WREG32_SOC15(NBIO, 0, regGDC0_BIF_IH_DOORBELL_RANGE,
ih_doorbell_range);
}
static void nbio_v7_7_ih_control(struct amdgpu_device *adev)
{
u32 interrupt_cntl;
/* setup interrupt control */
WREG32_SOC15(NBIO, 0, regBIF_BX1_INTERRUPT_CNTL2,
adev->dummy_page_addr >> 8);
interrupt_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX1_INTERRUPT_CNTL);
/*
* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
*/
interrupt_cntl = REG_SET_FIELD(interrupt_cntl, BIF_BX1_INTERRUPT_CNTL,
IH_DUMMY_RD_OVERRIDE, 0);
/* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
interrupt_cntl = REG_SET_FIELD(interrupt_cntl, BIF_BX1_INTERRUPT_CNTL,
IH_REQ_NONSNOOP_EN, 0);
WREG32_SOC15(NBIO, 0, regBIF_BX1_INTERRUPT_CNTL, interrupt_cntl);
}
static u32 nbio_v7_7_get_hdp_flush_req_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_GPU_HDP_FLUSH_REQ);
}
static u32 nbio_v7_7_get_hdp_flush_done_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_GPU_HDP_FLUSH_DONE);
}
static u32 nbio_v7_7_get_pcie_index_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX0_PCIE_INDEX2);
}
static u32 nbio_v7_7_get_pcie_data_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX0_PCIE_DATA2);
}
static u32 nbio_v7_7_get_pcie_port_index_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_RSMU_INDEX);
}
static u32 nbio_v7_7_get_pcie_port_data_offset(struct amdgpu_device *adev)
{
return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_RSMU_DATA);
}
const struct nbio_hdp_flush_reg nbio_v7_7_hdp_flush_reg = {
.ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
.ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
.ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
.ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK,
.ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK,
.ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK,
.ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK,
.ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK,
.ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK,
.ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK,
.ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK,
.ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK,
};
static void nbio_v7_7_init_registers(struct amdgpu_device *adev)
{
uint32_t def, data;
def = data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_MST_CTRL_3);
data = REG_SET_FIELD(data, BIF0_PCIE_MST_CTRL_3,
CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
data = REG_SET_FIELD(data, BIF0_PCIE_MST_CTRL_3,
CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1);
if (def != data)
WREG32_SOC15(NBIO, 0, regBIF0_PCIE_MST_CTRL_3, data);
}
static void nbio_v7_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t def, data;
if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG))
return;
def = data = RREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL);
if (enable) {
data |= (BIF0_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
BIF0_CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
BIF0_CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
BIF0_CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
BIF0_CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
BIF0_CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
} else {
data &= ~(BIF0_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
BIF0_CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
BIF0_CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
BIF0_CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
BIF0_CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
BIF0_CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
}
if (def != data)
WREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL, data);
}
static void nbio_v7_7_update_medium_grain_light_sleep(struct amdgpu_device *adev,
bool enable)
{
uint32_t def, data;
if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
return;
def = data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2);
if (enable)
data |= BIF0_PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
else
data &= ~BIF0_PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
if (def != data)
WREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2, data);
def = data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_TX_POWER_CTRL_1);
if (enable) {
data |= (BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK |
BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK);
} else {
data &= ~(BIF0_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK |
BIF0_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK);
}
if (def != data)
WREG32_SOC15(NBIO, 0, regBIF0_PCIE_TX_POWER_CTRL_1, data);
}
static void nbio_v7_7_get_clockgating_state(struct amdgpu_device *adev,
u64 *flags)
{
uint32_t data;
/* AMD_CG_SUPPORT_BIF_MGCG */
data = RREG32_SOC15(NBIO, 0, regBIF0_CPM_CONTROL);
if (data & BIF0_CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
*flags |= AMD_CG_SUPPORT_BIF_MGCG;
/* AMD_CG_SUPPORT_BIF_LS */
data = RREG32_SOC15(NBIO, 0, regBIF0_PCIE_CNTL2);
if (data & BIF0_PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
*flags |= AMD_CG_SUPPORT_BIF_LS;
}
const struct amdgpu_nbio_funcs nbio_v7_7_funcs = {
.get_hdp_flush_req_offset = nbio_v7_7_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v7_7_get_hdp_flush_done_offset,
.get_pcie_index_offset = nbio_v7_7_get_pcie_index_offset,
.get_pcie_data_offset = nbio_v7_7_get_pcie_data_offset,
.get_pcie_port_index_offset = nbio_v7_7_get_pcie_port_index_offset,
.get_pcie_port_data_offset = nbio_v7_7_get_pcie_port_data_offset,
.get_rev_id = nbio_v7_7_get_rev_id,
.mc_access_enable = nbio_v7_7_mc_access_enable,
.get_memsize = nbio_v7_7_get_memsize,
.sdma_doorbell_range = nbio_v7_7_sdma_doorbell_range,
.vcn_doorbell_range = nbio_v7_7_vcn_doorbell_range,
.enable_doorbell_aperture = nbio_v7_7_enable_doorbell_aperture,
.enable_doorbell_selfring_aperture = nbio_v7_7_enable_doorbell_selfring_aperture,
.ih_doorbell_range = nbio_v7_7_ih_doorbell_range,
.update_medium_grain_clock_gating = nbio_v7_7_update_medium_grain_clock_gating,
.update_medium_grain_light_sleep = nbio_v7_7_update_medium_grain_light_sleep,
.get_clockgating_state = nbio_v7_7_get_clockgating_state,
.ih_control = nbio_v7_7_ih_control,
.init_registers = nbio_v7_7_init_registers,
.remap_hdp_registers = nbio_v7_7_remap_hdp_registers,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c |
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: monk liu <[email protected]>
*/
#include <drm/drm_auth.h>
#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_sched.h"
#include "amdgpu_ras.h"
#include <linux/nospec.h>
#define to_amdgpu_ctx_entity(e) \
container_of((e), struct amdgpu_ctx_entity, entity)
const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
[AMDGPU_HW_IP_GFX] = 1,
[AMDGPU_HW_IP_COMPUTE] = 4,
[AMDGPU_HW_IP_DMA] = 2,
[AMDGPU_HW_IP_UVD] = 1,
[AMDGPU_HW_IP_VCE] = 1,
[AMDGPU_HW_IP_UVD_ENC] = 1,
[AMDGPU_HW_IP_VCN_DEC] = 1,
[AMDGPU_HW_IP_VCN_ENC] = 1,
[AMDGPU_HW_IP_VCN_JPEG] = 1,
};
bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
{
switch (ctx_prio) {
case AMDGPU_CTX_PRIORITY_UNSET:
case AMDGPU_CTX_PRIORITY_VERY_LOW:
case AMDGPU_CTX_PRIORITY_LOW:
case AMDGPU_CTX_PRIORITY_NORMAL:
case AMDGPU_CTX_PRIORITY_HIGH:
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
return true;
default:
return false;
}
}
static enum drm_sched_priority
amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
{
switch (ctx_prio) {
case AMDGPU_CTX_PRIORITY_UNSET:
return DRM_SCHED_PRIORITY_UNSET;
case AMDGPU_CTX_PRIORITY_VERY_LOW:
return DRM_SCHED_PRIORITY_MIN;
case AMDGPU_CTX_PRIORITY_LOW:
return DRM_SCHED_PRIORITY_MIN;
case AMDGPU_CTX_PRIORITY_NORMAL:
return DRM_SCHED_PRIORITY_NORMAL;
case AMDGPU_CTX_PRIORITY_HIGH:
return DRM_SCHED_PRIORITY_HIGH;
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
return DRM_SCHED_PRIORITY_HIGH;
/* This should not happen as we sanitized userspace provided priority
* already, WARN if this happens.
*/
default:
WARN(1, "Invalid context priority %d\n", ctx_prio);
return DRM_SCHED_PRIORITY_NORMAL;
}
}
static int amdgpu_ctx_priority_permit(struct drm_file *filp,
int32_t priority)
{
if (!amdgpu_ctx_priority_is_valid(priority))
return -EINVAL;
/* NORMAL and below are accessible by everyone */
if (priority <= AMDGPU_CTX_PRIORITY_NORMAL)
return 0;
if (capable(CAP_SYS_NICE))
return 0;
if (drm_is_current_master(filp))
return 0;
return -EACCES;
}
static enum amdgpu_gfx_pipe_priority amdgpu_ctx_prio_to_gfx_pipe_prio(int32_t prio)
{
switch (prio) {
case AMDGPU_CTX_PRIORITY_HIGH:
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
return AMDGPU_GFX_PIPE_PRIO_HIGH;
default:
return AMDGPU_GFX_PIPE_PRIO_NORMAL;
}
}
static enum amdgpu_ring_priority_level amdgpu_ctx_sched_prio_to_ring_prio(int32_t prio)
{
switch (prio) {
case AMDGPU_CTX_PRIORITY_HIGH:
return AMDGPU_RING_PRIO_1;
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
return AMDGPU_RING_PRIO_2;
default:
return AMDGPU_RING_PRIO_0;
}
}
static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip)
{
struct amdgpu_device *adev = ctx->mgr->adev;
unsigned int hw_prio;
int32_t ctx_prio;
ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;
switch (hw_ip) {
case AMDGPU_HW_IP_GFX:
case AMDGPU_HW_IP_COMPUTE:
hw_prio = amdgpu_ctx_prio_to_gfx_pipe_prio(ctx_prio);
break;
case AMDGPU_HW_IP_VCE:
case AMDGPU_HW_IP_VCN_ENC:
hw_prio = amdgpu_ctx_sched_prio_to_ring_prio(ctx_prio);
break;
default:
hw_prio = AMDGPU_RING_PRIO_DEFAULT;
break;
}
hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)
hw_prio = AMDGPU_RING_PRIO_DEFAULT;
return hw_prio;
}
/* Calculate the time spend on the hw */
static ktime_t amdgpu_ctx_fence_time(struct dma_fence *fence)
{
struct drm_sched_fence *s_fence;
if (!fence)
return ns_to_ktime(0);
/* When the fence is not even scheduled it can't have spend time */
s_fence = to_drm_sched_fence(fence);
if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->scheduled.flags))
return ns_to_ktime(0);
/* When it is still running account how much already spend */
if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->finished.flags))
return ktime_sub(ktime_get(), s_fence->scheduled.timestamp);
return ktime_sub(s_fence->finished.timestamp,
s_fence->scheduled.timestamp);
}
static ktime_t amdgpu_ctx_entity_time(struct amdgpu_ctx *ctx,
struct amdgpu_ctx_entity *centity)
{
ktime_t res = ns_to_ktime(0);
uint32_t i;
spin_lock(&ctx->ring_lock);
for (i = 0; i < amdgpu_sched_jobs; i++) {
res = ktime_add(res, amdgpu_ctx_fence_time(centity->fences[i]));
}
spin_unlock(&ctx->ring_lock);
return res;
}
static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
const u32 ring)
{
struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
struct amdgpu_device *adev = ctx->mgr->adev;
struct amdgpu_ctx_entity *entity;
enum drm_sched_priority drm_prio;
unsigned int hw_prio, num_scheds;
int32_t ctx_prio;
int r;
entity = kzalloc(struct_size(entity, fences, amdgpu_sched_jobs),
GFP_KERNEL);
if (!entity)
return -ENOMEM;
ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;
entity->hw_ip = hw_ip;
entity->sequence = 1;
hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);
drm_prio = amdgpu_ctx_to_drm_sched_prio(ctx_prio);
hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
if (!(adev)->xcp_mgr) {
scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
} else {
struct amdgpu_fpriv *fpriv;
fpriv = container_of(ctx->ctx_mgr, struct amdgpu_fpriv, ctx_mgr);
r = amdgpu_xcp_select_scheds(adev, hw_ip, hw_prio, fpriv,
&num_scheds, &scheds);
if (r)
goto cleanup_entity;
}
/* disable load balance if the hw engine retains context among dependent jobs */
if (hw_ip == AMDGPU_HW_IP_VCN_ENC ||
hw_ip == AMDGPU_HW_IP_VCN_DEC ||
hw_ip == AMDGPU_HW_IP_UVD_ENC ||
hw_ip == AMDGPU_HW_IP_UVD) {
sched = drm_sched_pick_best(scheds, num_scheds);
scheds = &sched;
num_scheds = 1;
}
r = drm_sched_entity_init(&entity->entity, drm_prio, scheds, num_scheds,
&ctx->guilty);
if (r)
goto error_free_entity;
/* It's not an error if we fail to install the new entity */
if (cmpxchg(&ctx->entities[hw_ip][ring], NULL, entity))
goto cleanup_entity;
return 0;
cleanup_entity:
drm_sched_entity_fini(&entity->entity);
error_free_entity:
kfree(entity);
return r;
}
static ktime_t amdgpu_ctx_fini_entity(struct amdgpu_device *adev,
struct amdgpu_ctx_entity *entity)
{
ktime_t res = ns_to_ktime(0);
int i;
if (!entity)
return res;
for (i = 0; i < amdgpu_sched_jobs; ++i) {
res = ktime_add(res, amdgpu_ctx_fence_time(entity->fences[i]));
dma_fence_put(entity->fences[i]);
}
amdgpu_xcp_release_sched(adev, entity);
kfree(entity);
return res;
}
static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx,
u32 *stable_pstate)
{
struct amdgpu_device *adev = ctx->mgr->adev;
enum amd_dpm_forced_level current_level;
current_level = amdgpu_dpm_get_performance_level(adev);
switch (current_level) {
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
*stable_pstate = AMDGPU_CTX_STABLE_PSTATE_STANDARD;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
*stable_pstate = AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
*stable_pstate = AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
*stable_pstate = AMDGPU_CTX_STABLE_PSTATE_PEAK;
break;
default:
*stable_pstate = AMDGPU_CTX_STABLE_PSTATE_NONE;
break;
}
return 0;
}
static int amdgpu_ctx_init(struct amdgpu_ctx_mgr *mgr, int32_t priority,
struct drm_file *filp, struct amdgpu_ctx *ctx)
{
struct amdgpu_fpriv *fpriv = filp->driver_priv;
u32 current_stable_pstate;
int r;
r = amdgpu_ctx_priority_permit(filp, priority);
if (r)
return r;
memset(ctx, 0, sizeof(*ctx));
kref_init(&ctx->refcount);
ctx->mgr = mgr;
spin_lock_init(&ctx->ring_lock);
ctx->reset_counter = atomic_read(&mgr->adev->gpu_reset_counter);
ctx->reset_counter_query = ctx->reset_counter;
ctx->generation = amdgpu_vm_generation(mgr->adev, &fpriv->vm);
ctx->init_priority = priority;
ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET;
r = amdgpu_ctx_get_stable_pstate(ctx, ¤t_stable_pstate);
if (r)
return r;
if (mgr->adev->pm.stable_pstate_ctx)
ctx->stable_pstate = mgr->adev->pm.stable_pstate_ctx->stable_pstate;
else
ctx->stable_pstate = current_stable_pstate;
ctx->ctx_mgr = &(fpriv->ctx_mgr);
return 0;
}
static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx,
u32 stable_pstate)
{
struct amdgpu_device *adev = ctx->mgr->adev;
enum amd_dpm_forced_level level;
u32 current_stable_pstate;
int r;
mutex_lock(&adev->pm.stable_pstate_ctx_lock);
if (adev->pm.stable_pstate_ctx && adev->pm.stable_pstate_ctx != ctx) {
r = -EBUSY;
goto done;
}
r = amdgpu_ctx_get_stable_pstate(ctx, ¤t_stable_pstate);
if (r || (stable_pstate == current_stable_pstate))
goto done;
switch (stable_pstate) {
case AMDGPU_CTX_STABLE_PSTATE_NONE:
level = AMD_DPM_FORCED_LEVEL_AUTO;
break;
case AMDGPU_CTX_STABLE_PSTATE_STANDARD:
level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
break;
case AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK:
level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
break;
case AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK:
level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
break;
case AMDGPU_CTX_STABLE_PSTATE_PEAK:
level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
break;
default:
r = -EINVAL;
goto done;
}
r = amdgpu_dpm_force_performance_level(adev, level);
if (level == AMD_DPM_FORCED_LEVEL_AUTO)
adev->pm.stable_pstate_ctx = NULL;
else
adev->pm.stable_pstate_ctx = ctx;
done:
mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
return r;
}
static void amdgpu_ctx_fini(struct kref *ref)
{
struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
struct amdgpu_ctx_mgr *mgr = ctx->mgr;
struct amdgpu_device *adev = mgr->adev;
unsigned i, j, idx;
if (!adev)
return;
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) {
ktime_t spend;
spend = amdgpu_ctx_fini_entity(adev, ctx->entities[i][j]);
atomic64_add(ktime_to_ns(spend), &mgr->time_spend[i]);
}
}
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
amdgpu_ctx_set_stable_pstate(ctx, ctx->stable_pstate);
drm_dev_exit(idx);
}
kfree(ctx);
}
int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
u32 ring, struct drm_sched_entity **entity)
{
int r;
struct drm_sched_entity *ctx_entity;
if (hw_ip >= AMDGPU_HW_IP_NUM) {
DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
return -EINVAL;
}
/* Right now all IPs have only one instance - multiple rings. */
if (instance != 0) {
DRM_DEBUG("invalid ip instance: %d\n", instance);
return -EINVAL;
}
if (ring >= amdgpu_ctx_num_entities[hw_ip]) {
DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);
return -EINVAL;
}
if (ctx->entities[hw_ip][ring] == NULL) {
r = amdgpu_ctx_init_entity(ctx, hw_ip, ring);
if (r)
return r;
}
ctx_entity = &ctx->entities[hw_ip][ring]->entity;
r = drm_sched_entity_error(ctx_entity);
if (r) {
DRM_DEBUG("error entity %p\n", ctx_entity);
return r;
}
*entity = ctx_entity;
return 0;
}
static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv,
struct drm_file *filp,
int32_t priority,
uint32_t *id)
{
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
struct amdgpu_ctx *ctx;
int r;
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
mutex_lock(&mgr->lock);
r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
if (r < 0) {
mutex_unlock(&mgr->lock);
kfree(ctx);
return r;
}
*id = (uint32_t)r;
r = amdgpu_ctx_init(mgr, priority, filp, ctx);
if (r) {
idr_remove(&mgr->ctx_handles, *id);
*id = 0;
kfree(ctx);
}
mutex_unlock(&mgr->lock);
return r;
}
static void amdgpu_ctx_do_release(struct kref *ref)
{
struct amdgpu_ctx *ctx;
u32 i, j;
ctx = container_of(ref, struct amdgpu_ctx, refcount);
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
if (!ctx->entities[i][j])
continue;
drm_sched_entity_destroy(&ctx->entities[i][j]->entity);
}
}
amdgpu_ctx_fini(ref);
}
static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
{
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
struct amdgpu_ctx *ctx;
mutex_lock(&mgr->lock);
ctx = idr_remove(&mgr->ctx_handles, id);
if (ctx)
kref_put(&ctx->refcount, amdgpu_ctx_do_release);
mutex_unlock(&mgr->lock);
return ctx ? 0 : -EINVAL;
}
static int amdgpu_ctx_query(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv, uint32_t id,
union drm_amdgpu_ctx_out *out)
{
struct amdgpu_ctx *ctx;
struct amdgpu_ctx_mgr *mgr;
unsigned reset_counter;
if (!fpriv)
return -EINVAL;
mgr = &fpriv->ctx_mgr;
mutex_lock(&mgr->lock);
ctx = idr_find(&mgr->ctx_handles, id);
if (!ctx) {
mutex_unlock(&mgr->lock);
return -EINVAL;
}
/* TODO: these two are always zero */
out->state.flags = 0x0;
out->state.hangs = 0x0;
/* determine if a GPU reset has occured since the last call */
reset_counter = atomic_read(&adev->gpu_reset_counter);
/* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
if (ctx->reset_counter_query == reset_counter)
out->state.reset_status = AMDGPU_CTX_NO_RESET;
else
out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
ctx->reset_counter_query = reset_counter;
mutex_unlock(&mgr->lock);
return 0;
}
#define AMDGPU_RAS_COUNTE_DELAY_MS 3000
static int amdgpu_ctx_query2(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv, uint32_t id,
union drm_amdgpu_ctx_out *out)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct amdgpu_ctx *ctx;
struct amdgpu_ctx_mgr *mgr;
if (!fpriv)
return -EINVAL;
mgr = &fpriv->ctx_mgr;
mutex_lock(&mgr->lock);
ctx = idr_find(&mgr->ctx_handles, id);
if (!ctx) {
mutex_unlock(&mgr->lock);
return -EINVAL;
}
out->state.flags = 0x0;
out->state.hangs = 0x0;
if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
if (ctx->generation != amdgpu_vm_generation(adev, &fpriv->vm))
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
if (atomic_read(&ctx->guilty))
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
if (amdgpu_in_reset(adev))
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS;
if (adev->ras_enabled && con) {
/* Return the cached values in O(1),
* and schedule delayed work to cache
* new vaues.
*/
int ce_count, ue_count;
ce_count = atomic_read(&con->ras_ce_count);
ue_count = atomic_read(&con->ras_ue_count);
if (ce_count != ctx->ras_counter_ce) {
ctx->ras_counter_ce = ce_count;
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
}
if (ue_count != ctx->ras_counter_ue) {
ctx->ras_counter_ue = ue_count;
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
}
schedule_delayed_work(&con->ras_counte_delay_work,
msecs_to_jiffies(AMDGPU_RAS_COUNTE_DELAY_MS));
}
mutex_unlock(&mgr->lock);
return 0;
}
static int amdgpu_ctx_stable_pstate(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv, uint32_t id,
bool set, u32 *stable_pstate)
{
struct amdgpu_ctx *ctx;
struct amdgpu_ctx_mgr *mgr;
int r;
if (!fpriv)
return -EINVAL;
mgr = &fpriv->ctx_mgr;
mutex_lock(&mgr->lock);
ctx = idr_find(&mgr->ctx_handles, id);
if (!ctx) {
mutex_unlock(&mgr->lock);
return -EINVAL;
}
if (set)
r = amdgpu_ctx_set_stable_pstate(ctx, *stable_pstate);
else
r = amdgpu_ctx_get_stable_pstate(ctx, stable_pstate);
mutex_unlock(&mgr->lock);
return r;
}
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
int r;
uint32_t id, stable_pstate;
int32_t priority;
union drm_amdgpu_ctx *args = data;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv;
id = args->in.ctx_id;
priority = args->in.priority;
/* For backwards compatibility reasons, we need to accept
* ioctls with garbage in the priority field */
if (!amdgpu_ctx_priority_is_valid(priority))
priority = AMDGPU_CTX_PRIORITY_NORMAL;
switch (args->in.op) {
case AMDGPU_CTX_OP_ALLOC_CTX:
r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
args->out.alloc.ctx_id = id;
break;
case AMDGPU_CTX_OP_FREE_CTX:
r = amdgpu_ctx_free(fpriv, id);
break;
case AMDGPU_CTX_OP_QUERY_STATE:
r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
break;
case AMDGPU_CTX_OP_QUERY_STATE2:
r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
break;
case AMDGPU_CTX_OP_GET_STABLE_PSTATE:
if (args->in.flags)
return -EINVAL;
r = amdgpu_ctx_stable_pstate(adev, fpriv, id, false, &stable_pstate);
if (!r)
args->out.pstate.flags = stable_pstate;
break;
case AMDGPU_CTX_OP_SET_STABLE_PSTATE:
if (args->in.flags & ~AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK)
return -EINVAL;
stable_pstate = args->in.flags & AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK;
if (stable_pstate > AMDGPU_CTX_STABLE_PSTATE_PEAK)
return -EINVAL;
r = amdgpu_ctx_stable_pstate(adev, fpriv, id, true, &stable_pstate);
break;
default:
return -EINVAL;
}
return r;
}
struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
{
struct amdgpu_ctx *ctx;
struct amdgpu_ctx_mgr *mgr;
if (!fpriv)
return NULL;
mgr = &fpriv->ctx_mgr;
mutex_lock(&mgr->lock);
ctx = idr_find(&mgr->ctx_handles, id);
if (ctx)
kref_get(&ctx->refcount);
mutex_unlock(&mgr->lock);
return ctx;
}
int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
{
if (ctx == NULL)
return -EINVAL;
kref_put(&ctx->refcount, amdgpu_ctx_do_release);
return 0;
}
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
struct drm_sched_entity *entity,
struct dma_fence *fence)
{
struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
uint64_t seq = centity->sequence;
struct dma_fence *other = NULL;
unsigned idx = 0;
idx = seq & (amdgpu_sched_jobs - 1);
other = centity->fences[idx];
WARN_ON(other && !dma_fence_is_signaled(other));
dma_fence_get(fence);
spin_lock(&ctx->ring_lock);
centity->fences[idx] = fence;
centity->sequence++;
spin_unlock(&ctx->ring_lock);
atomic64_add(ktime_to_ns(amdgpu_ctx_fence_time(other)),
&ctx->mgr->time_spend[centity->hw_ip]);
dma_fence_put(other);
return seq;
}
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct drm_sched_entity *entity,
uint64_t seq)
{
struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
struct dma_fence *fence;
spin_lock(&ctx->ring_lock);
if (seq == ~0ull)
seq = centity->sequence - 1;
if (seq >= centity->sequence) {
spin_unlock(&ctx->ring_lock);
return ERR_PTR(-EINVAL);
}
if (seq + amdgpu_sched_jobs < centity->sequence) {
spin_unlock(&ctx->ring_lock);
return NULL;
}
fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]);
spin_unlock(&ctx->ring_lock);
return fence;
}
static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
struct amdgpu_ctx_entity *aentity,
int hw_ip,
int32_t priority)
{
struct amdgpu_device *adev = ctx->mgr->adev;
unsigned int hw_prio;
struct drm_gpu_scheduler **scheds = NULL;
unsigned num_scheds;
/* set sw priority */
drm_sched_entity_set_priority(&aentity->entity,
amdgpu_ctx_to_drm_sched_prio(priority));
/* set hw priority */
if (hw_ip == AMDGPU_HW_IP_COMPUTE || hw_ip == AMDGPU_HW_IP_GFX) {
hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);
hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX);
scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
drm_sched_entity_modify_sched(&aentity->entity, scheds,
num_scheds);
}
}
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
int32_t priority)
{
int32_t ctx_prio;
unsigned i, j;
ctx->override_priority = priority;
ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
if (!ctx->entities[i][j])
continue;
amdgpu_ctx_set_entity_priority(ctx, ctx->entities[i][j],
i, ctx_prio);
}
}
}
int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
struct drm_sched_entity *entity)
{
struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
struct dma_fence *other;
unsigned idx;
long r;
spin_lock(&ctx->ring_lock);
idx = centity->sequence & (amdgpu_sched_jobs - 1);
other = dma_fence_get(centity->fences[idx]);
spin_unlock(&ctx->ring_lock);
if (!other)
return 0;
r = dma_fence_wait(other, true);
if (r < 0 && r != -ERESTARTSYS)
DRM_ERROR("Error (%ld) waiting for fence!\n", r);
dma_fence_put(other);
return r;
}
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr,
struct amdgpu_device *adev)
{
unsigned int i;
mgr->adev = adev;
mutex_init(&mgr->lock);
idr_init_base(&mgr->ctx_handles, 1);
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
atomic64_set(&mgr->time_spend[i], 0);
}
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
{
struct amdgpu_ctx *ctx;
struct idr *idp;
uint32_t id, i, j;
idp = &mgr->ctx_handles;
mutex_lock(&mgr->lock);
idr_for_each_entry(idp, ctx, id) {
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
struct drm_sched_entity *entity;
if (!ctx->entities[i][j])
continue;
entity = &ctx->entities[i][j]->entity;
timeout = drm_sched_entity_flush(entity, timeout);
}
}
}
mutex_unlock(&mgr->lock);
return timeout;
}
void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
{
struct amdgpu_ctx *ctx;
struct idr *idp;
uint32_t id, i, j;
idp = &mgr->ctx_handles;
idr_for_each_entry(idp, ctx, id) {
if (kref_read(&ctx->refcount) != 1) {
DRM_ERROR("ctx %p is still alive\n", ctx);
continue;
}
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
struct drm_sched_entity *entity;
if (!ctx->entities[i][j])
continue;
entity = &ctx->entities[i][j]->entity;
drm_sched_entity_fini(entity);
}
}
}
}
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
{
struct amdgpu_ctx *ctx;
struct idr *idp;
uint32_t id;
amdgpu_ctx_mgr_entity_fini(mgr);
idp = &mgr->ctx_handles;
idr_for_each_entry(idp, ctx, id) {
if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
DRM_ERROR("ctx %p is still alive\n", ctx);
}
idr_destroy(&mgr->ctx_handles);
mutex_destroy(&mgr->lock);
}
void amdgpu_ctx_mgr_usage(struct amdgpu_ctx_mgr *mgr,
ktime_t usage[AMDGPU_HW_IP_NUM])
{
struct amdgpu_ctx *ctx;
unsigned int hw_ip, i;
uint32_t id;
/*
* This is a little bit racy because it can be that a ctx or a fence are
* destroyed just in the moment we try to account them. But that is ok
* since exactly that case is explicitely allowed by the interface.
*/
mutex_lock(&mgr->lock);
for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
uint64_t ns = atomic64_read(&mgr->time_spend[hw_ip]);
usage[hw_ip] = ns_to_ktime(ns);
}
idr_for_each_entry(&mgr->ctx_handles, ctx, id) {
for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
for (i = 0; i < amdgpu_ctx_num_entities[hw_ip]; ++i) {
struct amdgpu_ctx_entity *centity;
ktime_t spend;
centity = ctx->entities[hw_ip][i];
if (!centity)
continue;
spend = amdgpu_ctx_entity_time(ctx, centity);
usage[hw_ip] = ktime_add(usage[hw_ip], spend);
}
}
}
mutex_unlock(&mgr->lock);
}
| linux-master | drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c |
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "amdgpu_jpeg.h"
#include "amdgpu_pm.h"
#include "soc15.h"
#include "soc15d.h"
#include "jpeg_v2_0.h"
#include "vcn/vcn_3_0_0_offset.h"
#include "vcn/vcn_3_0_0_sh_mask.h"
#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
static void jpeg_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v3_0_set_irq_funcs(struct amdgpu_device *adev);
static int jpeg_v3_0_set_powergating_state(void *handle,
enum amd_powergating_state state);
/**
* jpeg_v3_0_early_init - set function pointers
*
* @handle: amdgpu_device pointer
*
* Set ring and irq function pointers
*/
static int jpeg_v3_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 harvest;
switch (adev->ip_versions[UVD_HWIP][0]) {
case IP_VERSION(3, 1, 1):
case IP_VERSION(3, 1, 2):
break;
default:
harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING);
if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
return -ENOENT;
break;
}
adev->jpeg.num_jpeg_inst = 1;
adev->jpeg.num_jpeg_rings = 1;
jpeg_v3_0_set_dec_ring_funcs(adev);
jpeg_v3_0_set_irq_funcs(adev);
return 0;
}
/**
* jpeg_v3_0_sw_init - sw init for JPEG block
*
* @handle: amdgpu_device pointer
*
* Load firmware and sw initialization
*/
static int jpeg_v3_0_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
int r;
/* JPEG TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
VCN_2_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq);
if (r)
return r;
r = amdgpu_jpeg_sw_init(adev);
if (r)
return r;
r = amdgpu_jpeg_resume(adev);
if (r)
return r;
ring = adev->jpeg.inst->ring_dec;
ring->use_doorbell = true;
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
ring->vm_hub = AMDGPU_MMHUB0(0);
sprintf(ring->name, "jpeg_dec");
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
adev->jpeg.internal.jpeg_pitch[0] = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH);
return 0;
}
/**
* jpeg_v3_0_sw_fini - sw fini for JPEG block
*
* @handle: amdgpu_device pointer
*
* JPEG suspend and free up sw allocation
*/
static int jpeg_v3_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
r = amdgpu_jpeg_suspend(adev);
if (r)
return r;
r = amdgpu_jpeg_sw_fini(adev);
return r;
}
/**
* jpeg_v3_0_hw_init - start and test JPEG block
*
* @handle: amdgpu_device pointer
*
*/
static int jpeg_v3_0_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
int r;
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
(adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
DRM_INFO("JPEG decode initialized successfully.\n");
return 0;
}
/**
* jpeg_v3_0_hw_fini - stop the hardware block
*
* @handle: amdgpu_device pointer
*
* Stop the JPEG block, mark ring as not ready any more
*/
static int jpeg_v3_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
cancel_delayed_work_sync(&adev->vcn.idle_work);
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
jpeg_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
return 0;
}
/**
* jpeg_v3_0_suspend - suspend JPEG block
*
* @handle: amdgpu_device pointer
*
* HW fini and suspend JPEG block
*/
static int jpeg_v3_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
r = jpeg_v3_0_hw_fini(adev);
if (r)
return r;
r = amdgpu_jpeg_suspend(adev);
return r;
}
/**
* jpeg_v3_0_resume - resume JPEG block
*
* @handle: amdgpu_device pointer
*
* Resume firmware and hw init JPEG block
*/
static int jpeg_v3_0_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
r = amdgpu_jpeg_resume(adev);
if (r)
return r;
r = jpeg_v3_0_hw_init(adev);
return r;
}
static void jpeg_v3_0_disable_clock_gating(struct amdgpu_device *adev)
{
uint32_t data = 0;
data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL);
if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG)
data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
else
data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data);
data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE);
data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
| JPEG_CGC_GATE__JPEG2_DEC_MASK
| JPEG_CGC_GATE__JPEG_ENC_MASK
| JPEG_CGC_GATE__JMCIF_MASK
| JPEG_CGC_GATE__JRBBM_MASK);
WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data);
data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL);
data &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK
| JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK
| JPEG_CGC_CTRL__JMCIF_MODE_MASK
| JPEG_CGC_CTRL__JRBBM_MODE_MASK);
WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data);
}
static void jpeg_v3_0_enable_clock_gating(struct amdgpu_device *adev)
{
uint32_t data = 0;
data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE);
data |= (JPEG_CGC_GATE__JPEG_DEC_MASK
|JPEG_CGC_GATE__JPEG2_DEC_MASK
|JPEG_CGC_GATE__JPEG_ENC_MASK
|JPEG_CGC_GATE__JMCIF_MASK
|JPEG_CGC_GATE__JRBBM_MASK);
WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data);
}
static int jpeg_v3_0_disable_static_power_gating(struct amdgpu_device *adev)
{
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
uint32_t data = 0;
int r = 0;
data = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data);
r = SOC15_WAIT_ON_RREG(JPEG, 0,
mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON,
UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
if (r) {
DRM_ERROR("amdgpu: JPEG disable power gating failed\n");
return r;
}
}
/* disable anti hang mechanism */
WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), 0,
~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
/* keep the JPEG in static PG mode */
WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), 0,
~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK);
return 0;
}
static int jpeg_v3_0_enable_static_power_gating(struct amdgpu_device *adev)
{
/* enable anti hang mechanism */
WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS),
UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
uint32_t data = 0;
int r = 0;
data = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data);
r = SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_PGFSM_STATUS,
(2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT),
UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
if (r) {
DRM_ERROR("amdgpu: JPEG enable power gating failed\n");
return r;
}
}
return 0;
}
/**
* jpeg_v3_0_start - start JPEG block
*
* @adev: amdgpu_device pointer
*
* Setup and start the JPEG block
*/
static int jpeg_v3_0_start(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
int r;
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_jpeg(adev, true);
/* disable power gating */
r = jpeg_v3_0_disable_static_power_gating(adev);
if (r)
return r;
/* JPEG disable CGC */
jpeg_v3_0_disable_clock_gating(adev);
/* MJPEG global tiling registers */
WREG32_SOC15(JPEG, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG,
adev->gfx.config.gb_addr_config);
WREG32_SOC15(JPEG, 0, mmJPEG_ENC_GFX10_ADDR_CONFIG,
adev->gfx.config.gb_addr_config);
/* enable JMI channel */
WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL), 0,
~UVD_JMI_CNTL__SOFT_RESET_MASK);
/* enable System Interrupt for JRBC */
WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmJPEG_SYS_INT_EN),
JPEG_SYS_INT_EN__DJRBC_MASK,
~JPEG_SYS_INT_EN__DJRBC_MASK);
WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
upper_32_bits(ring->gpu_addr));
WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR, 0);
WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, 0);
WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
ring->wptr = RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR);
return 0;
}
/**
* jpeg_v3_0_stop - stop JPEG block
*
* @adev: amdgpu_device pointer
*
* stop the JPEG block
*/
static int jpeg_v3_0_stop(struct amdgpu_device *adev)
{
int r;
/* reset JMI */
WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL),
UVD_JMI_CNTL__SOFT_RESET_MASK,
~UVD_JMI_CNTL__SOFT_RESET_MASK);
jpeg_v3_0_enable_clock_gating(adev);
/* enable power gating */
r = jpeg_v3_0_enable_static_power_gating(adev);
if (r)
return r;
if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_jpeg(adev, false);
return 0;
}
/**
* jpeg_v3_0_dec_ring_get_rptr - get read pointer
*
* @ring: amdgpu_ring pointer
*
* Returns the current hardware read pointer
*/
static uint64_t jpeg_v3_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR);
}
/**
* jpeg_v3_0_dec_ring_get_wptr - get write pointer
*
* @ring: amdgpu_ring pointer
*
* Returns the current hardware write pointer
*/
static uint64_t jpeg_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring->use_doorbell)
return *ring->wptr_cpu_addr;
else
return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR);
}
/**
* jpeg_v3_0_dec_ring_set_wptr - set write pointer
*
* @ring: amdgpu_ring pointer
*
* Commits the write pointer to the hardware
*/
static void jpeg_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
if (ring->use_doorbell) {
*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
} else {
WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
}
}
static bool jpeg_v3_0_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret = 1;
ret &= (((RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS) &
UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
UVD_JRBC_STATUS__RB_JOB_DONE_MASK));
return ret;
}
static int jpeg_v3_0_wait_for_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS,
UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
}
static int jpeg_v3_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = state == AMD_CG_STATE_GATE;
if (enable) {
if (!jpeg_v3_0_is_idle(handle))
return -EBUSY;
jpeg_v3_0_enable_clock_gating(adev);
} else {
jpeg_v3_0_disable_clock_gating(adev);
}
return 0;
}
static int jpeg_v3_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
if(state == adev->jpeg.cur_state)
return 0;
if (state == AMD_PG_STATE_GATE)
ret = jpeg_v3_0_stop(adev);
else
ret = jpeg_v3_0_start(adev);
if(!ret)
adev->jpeg.cur_state = state;
return ret;
}
static int jpeg_v3_0_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
enum amdgpu_interrupt_state state)
{
return 0;
}
static int jpeg_v3_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
DRM_DEBUG("IH: JPEG TRAP\n");
switch (entry->src_id) {
case VCN_2_0__SRCID__JPEG_DECODE:
amdgpu_fence_process(adev->jpeg.inst->ring_dec);
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n",
entry->src_id, entry->src_data[0]);
break;
}
return 0;
}
static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = {
.name = "jpeg_v3_0",
.early_init = jpeg_v3_0_early_init,
.late_init = NULL,
.sw_init = jpeg_v3_0_sw_init,
.sw_fini = jpeg_v3_0_sw_fini,
.hw_init = jpeg_v3_0_hw_init,
.hw_fini = jpeg_v3_0_hw_fini,
.suspend = jpeg_v3_0_suspend,
.resume = jpeg_v3_0_resume,
.is_idle = jpeg_v3_0_is_idle,
.wait_for_idle = jpeg_v3_0_wait_for_idle,
.check_soft_reset = NULL,
.pre_soft_reset = NULL,
.soft_reset = NULL,
.post_soft_reset = NULL,
.set_clockgating_state = jpeg_v3_0_set_clockgating_state,
.set_powergating_state = jpeg_v3_0_set_powergating_state,
};
static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_JPEG,
.align_mask = 0xf,
.get_rptr = jpeg_v3_0_dec_ring_get_rptr,
.get_wptr = jpeg_v3_0_dec_ring_get_wptr,
.set_wptr = jpeg_v3_0_dec_ring_set_wptr,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
8 + /* jpeg_v3_0_dec_ring_emit_vm_flush */
18 + 18 + /* jpeg_v3_0_dec_ring_emit_fence x2 vm fence */
8 + 16,
.emit_ib_size = 22, /* jpeg_v3_0_dec_ring_emit_ib */
.emit_ib = jpeg_v2_0_dec_ring_emit_ib,
.emit_fence = jpeg_v2_0_dec_ring_emit_fence,
.emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush,
.test_ring = amdgpu_jpeg_dec_ring_test_ring,
.test_ib = amdgpu_jpeg_dec_ring_test_ib,
.insert_nop = jpeg_v2_0_dec_ring_nop,
.insert_start = jpeg_v2_0_dec_ring_insert_start,
.insert_end = jpeg_v2_0_dec_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_jpeg_ring_begin_use,
.end_use = amdgpu_jpeg_ring_end_use,
.emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
.emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
static void jpeg_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev)
{
adev->jpeg.inst->ring_dec->funcs = &jpeg_v3_0_dec_ring_vm_funcs;
DRM_INFO("JPEG decode is enabled in VM mode\n");
}
static const struct amdgpu_irq_src_funcs jpeg_v3_0_irq_funcs = {
.set = jpeg_v3_0_set_interrupt_state,
.process = jpeg_v3_0_process_interrupt,
};
static void jpeg_v3_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->jpeg.inst->irq.num_types = 1;
adev->jpeg.inst->irq.funcs = &jpeg_v3_0_irq_funcs;
}
const struct amdgpu_ip_block_version jpeg_v3_0_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_JPEG,
.major = 3,
.minor = 0,
.rev = 0,
.funcs = &jpeg_v3_0_ip_funcs,
};
| linux-master | drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c |
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "../dmub_srv.h"
#include "dmub_reg.h"
#include "dmub_dcn315.h"
#include "dcn/dcn_3_1_5_offset.h"
#include "dcn/dcn_3_1_5_sh_mask.h"
#define DCN_BASE__INST0_SEG0 0x00000012
#define DCN_BASE__INST0_SEG1 0x000000C0
#define DCN_BASE__INST0_SEG2 0x000034C0
#define DCN_BASE__INST0_SEG3 0x00009000
#define DCN_BASE__INST0_SEG4 0x02403C00
#define DCN_BASE__INST0_SEG5 0
#define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg
#define CTX dmub
#define REGS dmub->regs_dcn31
#define REG_OFFSET_EXP(reg_name) (BASE(reg##reg_name##_BASE_IDX) + reg##reg_name)
/* Registers. */
const struct dmub_srv_dcn31_regs dmub_srv_dcn315_regs = {
#define DMUB_SR(reg) REG_OFFSET_EXP(reg),
{
DMUB_DCN31_REGS()
DMCUB_INTERNAL_REGS()
},
#undef DMUB_SR
#define DMUB_SF(reg, field) FD_MASK(reg, field),
{ DMUB_DCN315_FIELDS() },
#undef DMUB_SF
#define DMUB_SF(reg, field) FD_SHIFT(reg, field),
{ DMUB_DCN315_FIELDS() },
#undef DMUB_SF
};
| linux-master | drivers/gpu/drm/amd/display/dmub/src/dmub_dcn315.c |
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "../dmub_srv.h"
#include "dmub_reg.h"
#include "dmub_dcn21.h"
#include "dcn/dcn_2_1_0_offset.h"
#include "dcn/dcn_2_1_0_sh_mask.h"
#include "renoir_ip_offset.h"
#define BASE_INNER(seg) DMU_BASE__INST0_SEG##seg
#define CTX dmub
#define REGS dmub->regs
/* Registers. */
const struct dmub_srv_common_regs dmub_srv_dcn21_regs = {
#define DMUB_SR(reg) REG_OFFSET(reg),
{
DMUB_COMMON_REGS()
DMCUB_INTERNAL_REGS()
},
#undef DMUB_SR
#define DMUB_SF(reg, field) FD_MASK(reg, field),
{ DMUB_COMMON_FIELDS() },
#undef DMUB_SF
#define DMUB_SF(reg, field) FD_SHIFT(reg, field),
{ DMUB_COMMON_FIELDS() },
#undef DMUB_SF
};
| linux-master | drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "../dmub_srv.h"
#include "dmub_reg.h"
#include "dmub_dcn31.h"
#include "yellow_carp_offset.h"
#include "dcn/dcn_3_1_2_offset.h"
#include "dcn/dcn_3_1_2_sh_mask.h"
#define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg
#define CTX dmub
#define REGS dmub->regs_dcn31
#define REG_OFFSET_EXP(reg_name) (BASE(reg##reg_name##_BASE_IDX) + reg##reg_name)
const struct dmub_srv_dcn31_regs dmub_srv_dcn31_regs = {
#define DMUB_SR(reg) REG_OFFSET_EXP(reg),
{
DMUB_DCN31_REGS()
DMCUB_INTERNAL_REGS()
},
#undef DMUB_SR
#define DMUB_SF(reg, field) FD_MASK(reg, field),
{ DMUB_DCN31_FIELDS() },
#undef DMUB_SF
#define DMUB_SF(reg, field) FD_SHIFT(reg, field),
{ DMUB_DCN31_FIELDS() },
#undef DMUB_SF
};
static void dmub_dcn31_get_fb_base_offset(struct dmub_srv *dmub,
uint64_t *fb_base,
uint64_t *fb_offset)
{
uint32_t tmp;
if (dmub->fb_base || dmub->fb_offset) {
*fb_base = dmub->fb_base;
*fb_offset = dmub->fb_offset;
return;
}
REG_GET(DCN_VM_FB_LOCATION_BASE, FB_BASE, &tmp);
*fb_base = (uint64_t)tmp << 24;
REG_GET(DCN_VM_FB_OFFSET, FB_OFFSET, &tmp);
*fb_offset = (uint64_t)tmp << 24;
}
static inline void dmub_dcn31_translate_addr(const union dmub_addr *addr_in,
uint64_t fb_base,
uint64_t fb_offset,
union dmub_addr *addr_out)
{
addr_out->quad_part = addr_in->quad_part - fb_base + fb_offset;
}
void dmub_dcn31_reset(struct dmub_srv *dmub)
{
union dmub_gpint_data_register cmd;
const uint32_t timeout = 100;
uint32_t in_reset, scratch, i, pwait_mode;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
if (in_reset == 0) {
cmd.bits.status = 1;
cmd.bits.command_code = DMUB_GPINT__STOP_FW;
cmd.bits.param = 0;
dmub->hw_funcs.set_gpint(dmub, cmd);
/**
* Timeout covers both the ACK and the wait
* for remaining work to finish.
*/
for (i = 0; i < timeout; ++i) {
if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))
break;
udelay(1);
}
for (i = 0; i < timeout; ++i) {
scratch = dmub->hw_funcs.get_gpint_response(dmub);
if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
break;
udelay(1);
}
for (i = 0; i < timeout; ++i) {
REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &pwait_mode);
if (pwait_mode & (1 << 0))
break;
udelay(1);
}
/* Force reset in case we timed out, DMCUB is likely hung. */
}
REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
REG_WRITE(DMCUB_INBOX1_RPTR, 0);
REG_WRITE(DMCUB_INBOX1_WPTR, 0);
REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
REG_WRITE(DMCUB_OUTBOX1_WPTR, 0);
REG_WRITE(DMCUB_OUTBOX0_RPTR, 0);
REG_WRITE(DMCUB_OUTBOX0_WPTR, 0);
REG_WRITE(DMCUB_SCRATCH0, 0);
/* Clear the GPINT command manually so we don't send anything during boot. */
cmd.all = 0;
dmub->hw_funcs.set_gpint(dmub, cmd);
}
void dmub_dcn31_reset_release(struct dmub_srv *dmub)
{
REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 0);
REG_WRITE(DMCUB_SCRATCH15, dmub->psp_version & 0x001100FF);
REG_UPDATE_2(DMCUB_CNTL, DMCUB_ENABLE, 1, DMCUB_TRACEPORT_EN, 1);
REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 0);
}
void dmub_dcn31_backdoor_load(struct dmub_srv *dmub,
const struct dmub_window *cw0,
const struct dmub_window *cw1)
{
union dmub_addr offset;
uint64_t fb_base, fb_offset;
dmub_dcn31_get_fb_base_offset(dmub, &fb_base, &fb_offset);
REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
dmub_dcn31_translate_addr(&cw0->offset, fb_base, fb_offset, &offset);
REG_WRITE(DMCUB_REGION3_CW0_OFFSET, offset.u.low_part);
REG_WRITE(DMCUB_REGION3_CW0_OFFSET_HIGH, offset.u.high_part);
REG_WRITE(DMCUB_REGION3_CW0_BASE_ADDRESS, cw0->region.base);
REG_SET_2(DMCUB_REGION3_CW0_TOP_ADDRESS, 0,
DMCUB_REGION3_CW0_TOP_ADDRESS, cw0->region.top,
DMCUB_REGION3_CW0_ENABLE, 1);
dmub_dcn31_translate_addr(&cw1->offset, fb_base, fb_offset, &offset);
REG_WRITE(DMCUB_REGION3_CW1_OFFSET, offset.u.low_part);
REG_WRITE(DMCUB_REGION3_CW1_OFFSET_HIGH, offset.u.high_part);
REG_WRITE(DMCUB_REGION3_CW1_BASE_ADDRESS, cw1->region.base);
REG_SET_2(DMCUB_REGION3_CW1_TOP_ADDRESS, 0,
DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top,
DMCUB_REGION3_CW1_ENABLE, 1);
REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID,
0x20);
}
void dmub_dcn31_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw2,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
const struct dmub_window *cw6)
{
union dmub_addr offset;
offset = cw3->offset;
REG_WRITE(DMCUB_REGION3_CW3_OFFSET, offset.u.low_part);
REG_WRITE(DMCUB_REGION3_CW3_OFFSET_HIGH, offset.u.high_part);
REG_WRITE(DMCUB_REGION3_CW3_BASE_ADDRESS, cw3->region.base);
REG_SET_2(DMCUB_REGION3_CW3_TOP_ADDRESS, 0,
DMCUB_REGION3_CW3_TOP_ADDRESS, cw3->region.top,
DMCUB_REGION3_CW3_ENABLE, 1);
offset = cw4->offset;
REG_WRITE(DMCUB_REGION3_CW4_OFFSET, offset.u.low_part);
REG_WRITE(DMCUB_REGION3_CW4_OFFSET_HIGH, offset.u.high_part);
REG_WRITE(DMCUB_REGION3_CW4_BASE_ADDRESS, cw4->region.base);
REG_SET_2(DMCUB_REGION3_CW4_TOP_ADDRESS, 0,
DMCUB_REGION3_CW4_TOP_ADDRESS, cw4->region.top,
DMCUB_REGION3_CW4_ENABLE, 1);
offset = cw5->offset;
REG_WRITE(DMCUB_REGION3_CW5_OFFSET, offset.u.low_part);
REG_WRITE(DMCUB_REGION3_CW5_OFFSET_HIGH, offset.u.high_part);
REG_WRITE(DMCUB_REGION3_CW5_BASE_ADDRESS, cw5->region.base);
REG_SET_2(DMCUB_REGION3_CW5_TOP_ADDRESS, 0,
DMCUB_REGION3_CW5_TOP_ADDRESS, cw5->region.top,
DMCUB_REGION3_CW5_ENABLE, 1);
REG_WRITE(DMCUB_REGION5_OFFSET, offset.u.low_part);
REG_WRITE(DMCUB_REGION5_OFFSET_HIGH, offset.u.high_part);
REG_SET_2(DMCUB_REGION5_TOP_ADDRESS, 0,
DMCUB_REGION5_TOP_ADDRESS,
cw5->region.top - cw5->region.base - 1,
DMCUB_REGION5_ENABLE, 1);
offset = cw6->offset;
REG_WRITE(DMCUB_REGION3_CW6_OFFSET, offset.u.low_part);
REG_WRITE(DMCUB_REGION3_CW6_OFFSET_HIGH, offset.u.high_part);
REG_WRITE(DMCUB_REGION3_CW6_BASE_ADDRESS, cw6->region.base);
REG_SET_2(DMCUB_REGION3_CW6_TOP_ADDRESS, 0,
DMCUB_REGION3_CW6_TOP_ADDRESS, cw6->region.top,
DMCUB_REGION3_CW6_ENABLE, 1);
}
void dmub_dcn31_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1)
{
REG_WRITE(DMCUB_INBOX1_BASE_ADDRESS, inbox1->base);
REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base);
}
uint32_t dmub_dcn31_get_inbox1_wptr(struct dmub_srv *dmub)
{
return REG_READ(DMCUB_INBOX1_WPTR);
}
uint32_t dmub_dcn31_get_inbox1_rptr(struct dmub_srv *dmub)
{
return REG_READ(DMCUB_INBOX1_RPTR);
}
void dmub_dcn31_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset)
{
REG_WRITE(DMCUB_INBOX1_WPTR, wptr_offset);
}
void dmub_dcn31_setup_out_mailbox(struct dmub_srv *dmub,
const struct dmub_region *outbox1)
{
REG_WRITE(DMCUB_OUTBOX1_BASE_ADDRESS, outbox1->base);
REG_WRITE(DMCUB_OUTBOX1_SIZE, outbox1->top - outbox1->base);
}
uint32_t dmub_dcn31_get_outbox1_wptr(struct dmub_srv *dmub)
{
/**
* outbox1 wptr register is accessed without locks (dal & dc)
* and to be called only by dmub_srv_stat_get_notification()
*/
return REG_READ(DMCUB_OUTBOX1_WPTR);
}
void dmub_dcn31_set_outbox1_rptr(struct dmub_srv *dmub, uint32_t rptr_offset)
{
/**
* outbox1 rptr register is accessed without locks (dal & dc)
* and to be called only by dmub_srv_stat_get_notification()
*/
REG_WRITE(DMCUB_OUTBOX1_RPTR, rptr_offset);
}
bool dmub_dcn31_is_hw_init(struct dmub_srv *dmub)
{
union dmub_fw_boot_status status;
uint32_t is_enable;
status.all = REG_READ(DMCUB_SCRATCH0);
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enable);
return is_enable != 0 && status.bits.dal_fw;
}
bool dmub_dcn31_is_supported(struct dmub_srv *dmub)
{
uint32_t supported = 0;
REG_GET(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE, &supported);
return supported;
}
bool dmub_dcn31_is_psrsu_supported(struct dmub_srv *dmub)
{
return dmub->fw_version >= DMUB_FW_VERSION(4, 0, 59);
}
void dmub_dcn31_set_gpint(struct dmub_srv *dmub,
union dmub_gpint_data_register reg)
{
REG_WRITE(DMCUB_GPINT_DATAIN1, reg.all);
}
bool dmub_dcn31_is_gpint_acked(struct dmub_srv *dmub,
union dmub_gpint_data_register reg)
{
union dmub_gpint_data_register test;
reg.bits.status = 0;
test.all = REG_READ(DMCUB_GPINT_DATAIN1);
return test.all == reg.all;
}
uint32_t dmub_dcn31_get_gpint_response(struct dmub_srv *dmub)
{
return REG_READ(DMCUB_SCRATCH7);
}
uint32_t dmub_dcn31_get_gpint_dataout(struct dmub_srv *dmub)
{
uint32_t dataout = REG_READ(DMCUB_GPINT_DATAOUT);
REG_UPDATE(DMCUB_INTERRUPT_ENABLE, DMCUB_GPINT_IH_INT_EN, 0);
REG_WRITE(DMCUB_GPINT_DATAOUT, 0);
REG_UPDATE(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK, 1);
REG_UPDATE(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK, 0);
REG_UPDATE(DMCUB_INTERRUPT_ENABLE, DMCUB_GPINT_IH_INT_EN, 1);
return dataout;
}
union dmub_fw_boot_status dmub_dcn31_get_fw_boot_status(struct dmub_srv *dmub)
{
union dmub_fw_boot_status status;
status.all = REG_READ(DMCUB_SCRATCH0);
return status;
}
union dmub_fw_boot_options dmub_dcn31_get_fw_boot_option(struct dmub_srv *dmub)
{
union dmub_fw_boot_options option;
option.all = REG_READ(DMCUB_SCRATCH14);
return option;
}
void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmub_srv_hw_params *params)
{
union dmub_fw_boot_options boot_options = {0};
boot_options.bits.z10_disable = params->disable_z10;
boot_options.bits.dpia_supported = params->dpia_supported;
boot_options.bits.enable_dpia = params->disable_dpia ? 0 : 1;
boot_options.bits.usb4_cm_version = params->usb4_cm_version;
boot_options.bits.dpia_hpd_int_enable_supported = params->dpia_hpd_int_enable_supported;
boot_options.bits.power_optimization = params->power_optimization;
boot_options.bits.sel_mux_phy_c_d_phy_f_g = (dmub->asic == DMUB_ASIC_DCN31B) ? 1 : 0;
REG_WRITE(DMCUB_SCRATCH14, boot_options.all);
}
void dmub_dcn31_skip_dmub_panel_power_sequence(struct dmub_srv *dmub, bool skip)
{
union dmub_fw_boot_options boot_options;
boot_options.all = REG_READ(DMCUB_SCRATCH14);
boot_options.bits.skip_phy_init_panel_sequence = skip;
REG_WRITE(DMCUB_SCRATCH14, boot_options.all);
}
void dmub_dcn31_setup_outbox0(struct dmub_srv *dmub,
const struct dmub_region *outbox0)
{
REG_WRITE(DMCUB_OUTBOX0_BASE_ADDRESS, outbox0->base);
REG_WRITE(DMCUB_OUTBOX0_SIZE, outbox0->top - outbox0->base);
}
uint32_t dmub_dcn31_get_outbox0_wptr(struct dmub_srv *dmub)
{
return REG_READ(DMCUB_OUTBOX0_WPTR);
}
void dmub_dcn31_set_outbox0_rptr(struct dmub_srv *dmub, uint32_t rptr_offset)
{
REG_WRITE(DMCUB_OUTBOX0_RPTR, rptr_offset);
}
uint32_t dmub_dcn31_get_current_time(struct dmub_srv *dmub)
{
return REG_READ(DMCUB_TIMER_CURRENT);
}
void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data)
{
uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset;
uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled;
if (!dmub || !diag_data)
return;
memset(diag_data, 0, sizeof(*diag_data));
diag_data->dmcub_version = dmub->fw_version;
diag_data->scratch[0] = REG_READ(DMCUB_SCRATCH0);
diag_data->scratch[1] = REG_READ(DMCUB_SCRATCH1);
diag_data->scratch[2] = REG_READ(DMCUB_SCRATCH2);
diag_data->scratch[3] = REG_READ(DMCUB_SCRATCH3);
diag_data->scratch[4] = REG_READ(DMCUB_SCRATCH4);
diag_data->scratch[5] = REG_READ(DMCUB_SCRATCH5);
diag_data->scratch[6] = REG_READ(DMCUB_SCRATCH6);
diag_data->scratch[7] = REG_READ(DMCUB_SCRATCH7);
diag_data->scratch[8] = REG_READ(DMCUB_SCRATCH8);
diag_data->scratch[9] = REG_READ(DMCUB_SCRATCH9);
diag_data->scratch[10] = REG_READ(DMCUB_SCRATCH10);
diag_data->scratch[11] = REG_READ(DMCUB_SCRATCH11);
diag_data->scratch[12] = REG_READ(DMCUB_SCRATCH12);
diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13);
diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14);
diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15);
diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
diag_data->data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR);
diag_data->inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR);
diag_data->inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR);
diag_data->inbox1_size = REG_READ(DMCUB_INBOX1_SIZE);
diag_data->inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR);
diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR);
diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE);
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
diag_data->is_dmcub_enabled = is_dmub_enabled;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
diag_data->is_dmcub_soft_reset = is_soft_reset;
REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset);
diag_data->is_dmcub_secure_reset = is_sec_reset;
REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
diag_data->is_traceport_en = is_traceport_enabled;
REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled);
diag_data->is_cw0_enabled = is_cw0_enabled;
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
diag_data->is_cw6_enabled = is_cw6_enabled;
}
bool dmub_dcn31_should_detect(struct dmub_srv *dmub)
{
uint32_t fw_boot_status = REG_READ(DMCUB_SCRATCH0);
bool should_detect = (fw_boot_status & DMUB_FW_BOOT_STATUS_BIT_DETECTION_REQUIRED) != 0;
return should_detect;
}
| linux-master | drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c |
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "../dmub_srv.h"
#include "dmub_reg.h"
#include "dmub_dcn301.h"
#include "dcn/dcn_3_0_1_offset.h"
#include "dcn/dcn_3_0_1_sh_mask.h"
#include "vangogh_ip_offset.h"
#define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg
#define CTX dmub
#define REGS dmub->regs
/* Registers. */
const struct dmub_srv_common_regs dmub_srv_dcn301_regs = {
#define DMUB_SR(reg) REG_OFFSET(reg),
{
DMUB_COMMON_REGS()
DMCUB_INTERNAL_REGS()
},
#undef DMUB_SR
#define DMUB_SF(reg, field) FD_MASK(reg, field),
{ DMUB_COMMON_FIELDS() },
#undef DMUB_SF
#define DMUB_SF(reg, field) FD_SHIFT(reg, field),
{ DMUB_COMMON_FIELDS() },
#undef DMUB_SF
};
/* Shared functions. */
| linux-master | drivers/gpu/drm/amd/display/dmub/src/dmub_dcn301.c |
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "../dmub_srv.h"
#include "dmub_reg.h"
#include "dmub_dcn316.h"
#include "dcn/dcn_3_1_6_offset.h"
#include "dcn/dcn_3_1_6_sh_mask.h"
#define DCN_BASE__INST0_SEG0 0x00000012
#define DCN_BASE__INST0_SEG1 0x000000C0
#define DCN_BASE__INST0_SEG2 0x000034C0
#define DCN_BASE__INST0_SEG3 0x00009000
#define DCN_BASE__INST0_SEG4 0x02403C00
#define DCN_BASE__INST0_SEG5 0
#define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg
#define CTX dmub
#define REGS dmub->regs_dcn31
#define REG_OFFSET_EXP(reg_name) (BASE(reg##reg_name##_BASE_IDX) + reg##reg_name)
/* Registers. */
const struct dmub_srv_dcn31_regs dmub_srv_dcn316_regs = {
#define DMUB_SR(reg) REG_OFFSET_EXP(reg),
{
DMUB_DCN31_REGS()
DMCUB_INTERNAL_REGS()
},
#undef DMUB_SR
#define DMUB_SF(reg, field) FD_MASK(reg, field),
{ DMUB_DCN31_FIELDS() },
#undef DMUB_SF
#define DMUB_SF(reg, field) FD_SHIFT(reg, field),
{ DMUB_DCN31_FIELDS() },
#undef DMUB_SF
};
| linux-master | drivers/gpu/drm/amd/display/dmub/src/dmub_dcn316.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.