python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
#include "gt/intel_engine_pm.h"
#include "gt/intel_gpu_commands.h"
#include "i915_selftest.h"
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
#include "selftests/igt_reset.h"
#include "selftests/igt_spinner.h"
#include "selftests/intel_scheduler_helpers.h"
struct live_mocs {
struct drm_i915_mocs_table table;
struct drm_i915_mocs_table *mocs;
struct drm_i915_mocs_table *l3cc;
struct i915_vma *scratch;
void *vaddr;
};
static struct intel_context *mocs_context_create(struct intel_engine_cs *engine)
{
struct intel_context *ce;
ce = intel_context_create(engine);
if (IS_ERR(ce))
return ce;
/* We build large requests to read the registers from the ring */
ce->ring_size = SZ_16K;
return ce;
}
static int request_add_sync(struct i915_request *rq, int err)
{
i915_request_get(rq);
i915_request_add(rq);
if (i915_request_wait(rq, 0, HZ / 5) < 0)
err = -ETIME;
i915_request_put(rq);
return err;
}
static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
{
int err = 0;
i915_request_get(rq);
i915_request_add(rq);
if (spin && !igt_wait_for_spinner(spin, rq))
err = -ETIME;
i915_request_put(rq);
return err;
}
static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
{
unsigned int flags;
int err;
memset(arg, 0, sizeof(*arg));
flags = get_mocs_settings(gt->i915, &arg->table);
if (!flags)
return -EINVAL;
if (flags & HAS_RENDER_L3CC)
arg->l3cc = &arg->table;
if (flags & (HAS_GLOBAL_MOCS | HAS_ENGINE_MOCS))
arg->mocs = &arg->table;
arg->scratch =
__vm_create_scratch_for_read_pinned(>->ggtt->vm, PAGE_SIZE);
if (IS_ERR(arg->scratch))
return PTR_ERR(arg->scratch);
arg->vaddr = i915_gem_object_pin_map_unlocked(arg->scratch->obj, I915_MAP_WB);
if (IS_ERR(arg->vaddr)) {
err = PTR_ERR(arg->vaddr);
goto err_scratch;
}
return 0;
err_scratch:
i915_vma_unpin_and_release(&arg->scratch, 0);
return err;
}
static void live_mocs_fini(struct live_mocs *arg)
{
i915_vma_unpin_and_release(&arg->scratch, I915_VMA_RELEASE_MAP);
}
static int read_regs(struct i915_request *rq,
u32 addr, unsigned int count,
u32 *offset)
{
unsigned int i;
u32 *cs;
GEM_BUG_ON(!IS_ALIGNED(*offset, sizeof(u32)));
cs = intel_ring_begin(rq, 4 * count);
if (IS_ERR(cs))
return PTR_ERR(cs);
for (i = 0; i < count; i++) {
*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
*cs++ = addr;
*cs++ = *offset;
*cs++ = 0;
addr += sizeof(u32);
*offset += sizeof(u32);
}
intel_ring_advance(rq, cs);
return 0;
}
static int read_mocs_table(struct i915_request *rq,
const struct drm_i915_mocs_table *table,
u32 *offset)
{
struct intel_gt *gt = rq->engine->gt;
u32 addr;
if (!table)
return 0;
if (HAS_GLOBAL_MOCS_REGISTERS(rq->i915))
addr = global_mocs_offset() + gt->uncore->gsi_offset;
else
addr = mocs_offset(rq->engine);
return read_regs(rq, addr, table->n_entries, offset);
}
static int read_l3cc_table(struct i915_request *rq,
const struct drm_i915_mocs_table *table,
u32 *offset)
{
u32 addr = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
if (!table)
return 0;
return read_regs(rq, addr, (table->n_entries + 1) / 2, offset);
}
static int check_mocs_table(struct intel_engine_cs *engine,
const struct drm_i915_mocs_table *table,
u32 **vaddr)
{
unsigned int i;
u32 expect;
if (!table)
return 0;
for_each_mocs(expect, table, i) {
if (**vaddr != expect) {
pr_err("%s: Invalid MOCS[%d] entry, found %08x, expected %08x\n",
engine->name, i, **vaddr, expect);
return -EINVAL;
}
++*vaddr;
}
return 0;
}
static bool mcr_range(struct drm_i915_private *i915, u32 offset)
{
/*
* Registers in this range are affected by the MCR selector
* which only controls CPU initiated MMIO. Routing does not
* work for CS access so we cannot verify them on this path.
*/
return GRAPHICS_VER(i915) >= 8 && offset >= 0xb000 && offset <= 0xb4ff;
}
static int check_l3cc_table(struct intel_engine_cs *engine,
const struct drm_i915_mocs_table *table,
u32 **vaddr)
{
/* Can we read the MCR range 0xb00 directly? See intel_workarounds! */
u32 reg = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
unsigned int i;
u32 expect;
if (!table)
return 0;
for_each_l3cc(expect, table, i) {
if (!mcr_range(engine->i915, reg) && **vaddr != expect) {
pr_err("%s: Invalid L3CC[%d] entry, found %08x, expected %08x\n",
engine->name, i, **vaddr, expect);
return -EINVAL;
}
++*vaddr;
reg += 4;
}
return 0;
}
static int check_mocs_engine(struct live_mocs *arg,
struct intel_context *ce)
{
struct i915_vma *vma = arg->scratch;
struct i915_request *rq;
u32 offset;
u32 *vaddr;
int err;
memset32(arg->vaddr, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
rq = intel_context_create_request(ce);
if (IS_ERR(rq))
return PTR_ERR(rq);
err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE);
/* Read the mocs tables back using SRM */
offset = i915_ggtt_offset(vma);
if (!err)
err = read_mocs_table(rq, arg->mocs, &offset);
if (!err && ce->engine->class == RENDER_CLASS)
err = read_l3cc_table(rq, arg->l3cc, &offset);
offset -= i915_ggtt_offset(vma);
GEM_BUG_ON(offset > PAGE_SIZE);
err = request_add_sync(rq, err);
if (err)
return err;
/* Compare the results against the expected tables */
vaddr = arg->vaddr;
if (!err)
err = check_mocs_table(ce->engine, arg->mocs, &vaddr);
if (!err && ce->engine->class == RENDER_CLASS)
err = check_l3cc_table(ce->engine, arg->l3cc, &vaddr);
if (err)
return err;
GEM_BUG_ON(arg->vaddr + offset != vaddr);
return 0;
}
static int live_mocs_kernel(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct live_mocs mocs;
int err;
/* Basic check the system is configured with the expected mocs table */
err = live_mocs_init(&mocs, gt);
if (err)
return err;
for_each_engine(engine, gt, id) {
intel_engine_pm_get(engine);
err = check_mocs_engine(&mocs, engine->kernel_context);
intel_engine_pm_put(engine);
if (err)
break;
}
live_mocs_fini(&mocs);
return err;
}
static int live_mocs_clean(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct live_mocs mocs;
int err;
/* Every new context should see the same mocs table */
err = live_mocs_init(&mocs, gt);
if (err)
return err;
for_each_engine(engine, gt, id) {
struct intel_context *ce;
ce = mocs_context_create(engine);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
break;
}
err = check_mocs_engine(&mocs, ce);
intel_context_put(ce);
if (err)
break;
}
live_mocs_fini(&mocs);
return err;
}
static int active_engine_reset(struct intel_context *ce,
const char *reason,
bool using_guc)
{
struct igt_spinner spin;
struct i915_request *rq;
int err;
err = igt_spinner_init(&spin, ce->engine->gt);
if (err)
return err;
rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
if (IS_ERR(rq)) {
igt_spinner_fini(&spin);
return PTR_ERR(rq);
}
err = request_add_spin(rq, &spin);
if (err == 0 && !using_guc)
err = intel_engine_reset(ce->engine, reason);
/* Ensure the reset happens and kills the engine */
if (err == 0)
err = intel_selftest_wait_for_rq(rq);
igt_spinner_end(&spin);
igt_spinner_fini(&spin);
return err;
}
static int __live_mocs_reset(struct live_mocs *mocs,
struct intel_context *ce, bool using_guc)
{
struct intel_gt *gt = ce->engine->gt;
int err;
if (intel_has_reset_engine(gt)) {
if (!using_guc) {
err = intel_engine_reset(ce->engine, "mocs");
if (err)
return err;
err = check_mocs_engine(mocs, ce);
if (err)
return err;
}
err = active_engine_reset(ce, "mocs", using_guc);
if (err)
return err;
err = check_mocs_engine(mocs, ce);
if (err)
return err;
}
if (intel_has_gpu_reset(gt)) {
intel_gt_reset(gt, ce->engine->mask, "mocs");
err = check_mocs_engine(mocs, ce);
if (err)
return err;
}
return 0;
}
static int live_mocs_reset(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct live_mocs mocs;
int err = 0;
/* Check the mocs setup is retained over per-engine and global resets */
err = live_mocs_init(&mocs, gt);
if (err)
return err;
igt_global_reset_lock(gt);
for_each_engine(engine, gt, id) {
bool using_guc = intel_engine_uses_guc(engine);
struct intel_selftest_saved_policy saved;
struct intel_context *ce;
int err2;
err = intel_selftest_modify_policy(engine, &saved,
SELFTEST_SCHEDULER_MODIFY_FAST_RESET);
if (err)
break;
ce = mocs_context_create(engine);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto restore;
}
intel_engine_pm_get(engine);
err = __live_mocs_reset(&mocs, ce, using_guc);
intel_engine_pm_put(engine);
intel_context_put(ce);
restore:
err2 = intel_selftest_restore_policy(engine, &saved);
if (err == 0)
err = err2;
if (err)
break;
}
igt_global_reset_unlock(gt);
live_mocs_fini(&mocs);
return err;
}
int intel_mocs_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_mocs_kernel),
SUBTEST(live_mocs_clean),
SUBTEST(live_mocs_reset),
};
struct drm_i915_mocs_table table;
if (!get_mocs_settings(i915, &table))
return 0;
return intel_gt_live_subtests(tests, to_gt(i915));
}
| linux-master | drivers/gpu/drm/i915/gt/selftest_mocs.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2018 Intel Corporation
*/
#include <linux/prime_numbers.h>
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_reset.h"
#include "gt/selftest_engine_heartbeat.h"
#include "i915_selftest.h"
#include "selftests/i915_random.h"
#include "selftests/igt_flush_test.h"
#include "selftests/igt_live_test.h"
#include "selftests/igt_spinner.h"
#include "selftests/lib_sw_fence.h"
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
#define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4)
#define NUM_GPR 16
#define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */
static bool is_active(struct i915_request *rq)
{
if (i915_request_is_active(rq))
return true;
if (i915_request_on_hold(rq))
return true;
if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq))
return true;
return false;
}
static int wait_for_submit(struct intel_engine_cs *engine,
struct i915_request *rq,
unsigned long timeout)
{
/* Ignore our own attempts to suppress excess tasklets */
tasklet_hi_schedule(&engine->sched_engine->tasklet);
timeout += jiffies;
do {
bool done = time_after(jiffies, timeout);
if (i915_request_completed(rq)) /* that was quick! */
return 0;
/* Wait until the HW has acknowleged the submission (or err) */
intel_engine_flush_submission(engine);
if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
return 0;
if (done)
return -ETIME;
cond_resched();
} while (1);
}
static int wait_for_reset(struct intel_engine_cs *engine,
struct i915_request *rq,
unsigned long timeout)
{
timeout += jiffies;
do {
cond_resched();
intel_engine_flush_submission(engine);
if (READ_ONCE(engine->execlists.pending[0]))
continue;
if (i915_request_completed(rq))
break;
if (READ_ONCE(rq->fence.error))
break;
} while (time_before(jiffies, timeout));
if (rq->fence.error != -EIO) {
pr_err("%s: hanging request %llx:%lld not reset\n",
engine->name,
rq->fence.context,
rq->fence.seqno);
return -EINVAL;
}
/* Give the request a jiffie to complete after flushing the worker */
if (i915_request_wait(rq, 0,
max(0l, (long)(timeout - jiffies)) + 1) < 0) {
pr_err("%s: hanging request %llx:%lld did not complete\n",
engine->name,
rq->fence.context,
rq->fence.seqno);
return -ETIME;
}
return 0;
}
static int live_sanitycheck(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct igt_spinner spin;
int err = 0;
if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915))
return 0;
if (igt_spinner_init(&spin, gt))
return -ENOMEM;
for_each_engine(engine, gt, id) {
struct intel_context *ce;
struct i915_request *rq;
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
break;
}
rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_ctx;
}
i915_request_add(rq);
if (!igt_wait_for_spinner(&spin, rq)) {
GEM_TRACE("spinner failed to start\n");
GEM_TRACE_DUMP();
intel_gt_set_wedged(gt);
err = -EIO;
goto out_ctx;
}
igt_spinner_end(&spin);
if (igt_flush_test(gt->i915)) {
err = -EIO;
goto out_ctx;
}
out_ctx:
intel_context_put(ce);
if (err)
break;
}
igt_spinner_fini(&spin);
return err;
}
static int live_unlite_restore(struct intel_gt *gt, int prio)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct igt_spinner spin;
int err = -ENOMEM;
/*
* Check that we can correctly context switch between 2 instances
* on the same engine from the same parent context.
*/
if (igt_spinner_init(&spin, gt))
return err;
err = 0;
for_each_engine(engine, gt, id) {
struct intel_context *ce[2] = {};
struct i915_request *rq[2];
struct igt_live_test t;
int n;
if (prio && !intel_engine_has_preemption(engine))
continue;
if (!intel_engine_can_store_dword(engine))
continue;
if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
err = -EIO;
break;
}
st_engine_heartbeat_disable(engine);
for (n = 0; n < ARRAY_SIZE(ce); n++) {
struct intel_context *tmp;
tmp = intel_context_create(engine);
if (IS_ERR(tmp)) {
err = PTR_ERR(tmp);
goto err_ce;
}
err = intel_context_pin(tmp);
if (err) {
intel_context_put(tmp);
goto err_ce;
}
/*
* Setup the pair of contexts such that if we
* lite-restore using the RING_TAIL from ce[1] it
* will execute garbage from ce[0]->ring.
*/
memset(tmp->ring->vaddr,
POISON_INUSE, /* IPEHR: 0x5a5a5a5a [hung!] */
tmp->ring->vma->size);
ce[n] = tmp;
}
GEM_BUG_ON(!ce[1]->ring->size);
intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2);
lrc_update_regs(ce[1], engine, ce[1]->ring->head);
rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
if (IS_ERR(rq[0])) {
err = PTR_ERR(rq[0]);
goto err_ce;
}
i915_request_get(rq[0]);
i915_request_add(rq[0]);
GEM_BUG_ON(rq[0]->postfix > ce[1]->ring->emit);
if (!igt_wait_for_spinner(&spin, rq[0])) {
i915_request_put(rq[0]);
goto err_ce;
}
rq[1] = i915_request_create(ce[1]);
if (IS_ERR(rq[1])) {
err = PTR_ERR(rq[1]);
i915_request_put(rq[0]);
goto err_ce;
}
if (!prio) {
/*
* Ensure we do the switch to ce[1] on completion.
*
* rq[0] is already submitted, so this should reduce
* to a no-op (a wait on a request on the same engine
* uses the submit fence, not the completion fence),
* but it will install a dependency on rq[1] for rq[0]
* that will prevent the pair being reordered by
* timeslicing.
*/
i915_request_await_dma_fence(rq[1], &rq[0]->fence);
}
i915_request_get(rq[1]);
i915_request_add(rq[1]);
GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix);
i915_request_put(rq[0]);
if (prio) {
struct i915_sched_attr attr = {
.priority = prio,
};
/* Alternatively preempt the spinner with ce[1] */
engine->sched_engine->schedule(rq[1], &attr);
}
/* And switch back to ce[0] for good measure */
rq[0] = i915_request_create(ce[0]);
if (IS_ERR(rq[0])) {
err = PTR_ERR(rq[0]);
i915_request_put(rq[1]);
goto err_ce;
}
i915_request_await_dma_fence(rq[0], &rq[1]->fence);
i915_request_get(rq[0]);
i915_request_add(rq[0]);
GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix);
i915_request_put(rq[1]);
i915_request_put(rq[0]);
err_ce:
intel_engine_flush_submission(engine);
igt_spinner_end(&spin);
for (n = 0; n < ARRAY_SIZE(ce); n++) {
if (IS_ERR_OR_NULL(ce[n]))
break;
intel_context_unpin(ce[n]);
intel_context_put(ce[n]);
}
st_engine_heartbeat_enable(engine);
if (igt_live_test_end(&t))
err = -EIO;
if (err)
break;
}
igt_spinner_fini(&spin);
return err;
}
static int live_unlite_switch(void *arg)
{
return live_unlite_restore(arg, 0);
}
static int live_unlite_preempt(void *arg)
{
return live_unlite_restore(arg, I915_PRIORITY_MAX);
}
static int live_unlite_ring(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
struct igt_spinner spin;
enum intel_engine_id id;
int err = 0;
/*
* Setup a preemption event that will cause almost the entire ring
* to be unwound, potentially fooling our intel_ring_direction()
* into emitting a forward lite-restore instead of the rollback.
*/
if (igt_spinner_init(&spin, gt))
return -ENOMEM;
for_each_engine(engine, gt, id) {
struct intel_context *ce[2] = {};
struct i915_request *rq;
struct igt_live_test t;
int n;
if (!intel_engine_has_preemption(engine))
continue;
if (!intel_engine_can_store_dword(engine))
continue;
if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
err = -EIO;
break;
}
st_engine_heartbeat_disable(engine);
for (n = 0; n < ARRAY_SIZE(ce); n++) {
struct intel_context *tmp;
tmp = intel_context_create(engine);
if (IS_ERR(tmp)) {
err = PTR_ERR(tmp);
goto err_ce;
}
err = intel_context_pin(tmp);
if (err) {
intel_context_put(tmp);
goto err_ce;
}
memset32(tmp->ring->vaddr,
0xdeadbeef, /* trigger a hang if executed */
tmp->ring->vma->size / sizeof(u32));
ce[n] = tmp;
}
/* Create max prio spinner, followed by N low prio nops */
rq = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ce;
}
i915_request_get(rq);
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
i915_request_add(rq);
if (!igt_wait_for_spinner(&spin, rq)) {
intel_gt_set_wedged(gt);
i915_request_put(rq);
err = -ETIME;
goto err_ce;
}
/* Fill the ring, until we will cause a wrap */
n = 0;
while (intel_ring_direction(ce[0]->ring,
rq->wa_tail,
ce[0]->ring->tail) <= 0) {
struct i915_request *tmp;
tmp = intel_context_create_request(ce[0]);
if (IS_ERR(tmp)) {
err = PTR_ERR(tmp);
i915_request_put(rq);
goto err_ce;
}
i915_request_add(tmp);
intel_engine_flush_submission(engine);
n++;
}
intel_engine_flush_submission(engine);
pr_debug("%s: Filled ring with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
engine->name, n,
ce[0]->ring->size,
ce[0]->ring->tail,
ce[0]->ring->emit,
rq->tail);
GEM_BUG_ON(intel_ring_direction(ce[0]->ring,
rq->tail,
ce[0]->ring->tail) <= 0);
i915_request_put(rq);
/* Create a second ring to preempt the first ring after rq[0] */
rq = intel_context_create_request(ce[1]);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ce;
}
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
i915_request_get(rq);
i915_request_add(rq);
err = wait_for_submit(engine, rq, HZ / 2);
i915_request_put(rq);
if (err) {
pr_err("%s: preemption request was not submitted\n",
engine->name);
err = -ETIME;
}
pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
engine->name,
ce[0]->ring->tail, ce[0]->ring->emit,
ce[1]->ring->tail, ce[1]->ring->emit);
err_ce:
intel_engine_flush_submission(engine);
igt_spinner_end(&spin);
for (n = 0; n < ARRAY_SIZE(ce); n++) {
if (IS_ERR_OR_NULL(ce[n]))
break;
intel_context_unpin(ce[n]);
intel_context_put(ce[n]);
}
st_engine_heartbeat_enable(engine);
if (igt_live_test_end(&t))
err = -EIO;
if (err)
break;
}
igt_spinner_fini(&spin);
return err;
}
static int live_pin_rewind(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = 0;
/*
* We have to be careful not to trust intel_ring too much, for example
* ring->head is updated upon retire which is out of sync with pinning
* the context. Thus we cannot use ring->head to set CTX_RING_HEAD,
* or else we risk writing an older, stale value.
*
* To simulate this, let's apply a bit of deliberate sabotague.
*/
for_each_engine(engine, gt, id) {
struct intel_context *ce;
struct i915_request *rq;
struct intel_ring *ring;
struct igt_live_test t;
if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
err = -EIO;
break;
}
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
break;
}
err = intel_context_pin(ce);
if (err) {
intel_context_put(ce);
break;
}
/* Keep the context awake while we play games */
err = i915_active_acquire(&ce->active);
if (err) {
intel_context_unpin(ce);
intel_context_put(ce);
break;
}
ring = ce->ring;
/* Poison the ring, and offset the next request from HEAD */
memset32(ring->vaddr, STACK_MAGIC, ring->size / sizeof(u32));
ring->emit = ring->size / 2;
ring->tail = ring->emit;
GEM_BUG_ON(ring->head);
intel_context_unpin(ce);
/* Submit a simple nop request */
GEM_BUG_ON(intel_context_is_pinned(ce));
rq = intel_context_create_request(ce);
i915_active_release(&ce->active); /* e.g. async retire */
intel_context_put(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
break;
}
GEM_BUG_ON(!rq->head);
i915_request_add(rq);
/* Expect not to hang! */
if (igt_live_test_end(&t)) {
err = -EIO;
break;
}
}
return err;
}
static int engine_lock_reset_tasklet(struct intel_engine_cs *engine)
{
tasklet_disable(&engine->sched_engine->tasklet);
local_bh_disable();
if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
&engine->gt->reset.flags)) {
local_bh_enable();
tasklet_enable(&engine->sched_engine->tasklet);
intel_gt_set_wedged(engine->gt);
return -EBUSY;
}
return 0;
}
static void engine_unlock_reset_tasklet(struct intel_engine_cs *engine)
{
clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
&engine->gt->reset.flags);
local_bh_enable();
tasklet_enable(&engine->sched_engine->tasklet);
}
static int live_hold_reset(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct igt_spinner spin;
int err = 0;
/*
* In order to support offline error capture for fast preempt reset,
* we need to decouple the guilty request and ensure that it and its
* descendents are not executed while the capture is in progress.
*/
if (!intel_has_reset_engine(gt))
return 0;
if (igt_spinner_init(&spin, gt))
return -ENOMEM;
for_each_engine(engine, gt, id) {
struct intel_context *ce;
struct i915_request *rq;
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
break;
}
st_engine_heartbeat_disable(engine);
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out;
}
i915_request_add(rq);
if (!igt_wait_for_spinner(&spin, rq)) {
intel_gt_set_wedged(gt);
err = -ETIME;
goto out;
}
/* We have our request executing, now remove it and reset */
err = engine_lock_reset_tasklet(engine);
if (err)
goto out;
engine->sched_engine->tasklet.callback(&engine->sched_engine->tasklet);
GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
i915_request_get(rq);
execlists_hold(engine, rq);
GEM_BUG_ON(!i915_request_on_hold(rq));
__intel_engine_reset_bh(engine, NULL);
GEM_BUG_ON(rq->fence.error != -EIO);
engine_unlock_reset_tasklet(engine);
/* Check that we do not resubmit the held request */
if (!i915_request_wait(rq, 0, HZ / 5)) {
pr_err("%s: on hold request completed!\n",
engine->name);
i915_request_put(rq);
err = -EIO;
goto out;
}
GEM_BUG_ON(!i915_request_on_hold(rq));
/* But is resubmitted on release */
execlists_unhold(engine, rq);
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("%s: held request did not complete!\n",
engine->name);
intel_gt_set_wedged(gt);
err = -ETIME;
}
i915_request_put(rq);
out:
st_engine_heartbeat_enable(engine);
intel_context_put(ce);
if (err)
break;
}
igt_spinner_fini(&spin);
return err;
}
static const char *error_repr(int err)
{
return err ? "bad" : "good";
}
static int live_error_interrupt(void *arg)
{
static const struct error_phase {
enum { GOOD = 0, BAD = -EIO } error[2];
} phases[] = {
{ { BAD, GOOD } },
{ { BAD, BAD } },
{ { BAD, GOOD } },
{ { GOOD, GOOD } }, /* sentinel */
};
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
/*
* We hook up the CS_MASTER_ERROR_INTERRUPT to have forewarning
* of invalid commands in user batches that will cause a GPU hang.
* This is a faster mechanism than using hangcheck/heartbeats, but
* only detects problems the HW knows about -- it will not warn when
* we kill the HW!
*
* To verify our detection and reset, we throw some invalid commands
* at the HW and wait for the interrupt.
*/
if (!intel_has_reset_engine(gt))
return 0;
for_each_engine(engine, gt, id) {
const struct error_phase *p;
int err = 0;
st_engine_heartbeat_disable(engine);
for (p = phases; p->error[0] != GOOD; p++) {
struct i915_request *client[ARRAY_SIZE(phases->error)];
u32 *cs;
int i;
memset(client, 0, sizeof(*client));
for (i = 0; i < ARRAY_SIZE(client); i++) {
struct intel_context *ce;
struct i915_request *rq;
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out;
}
rq = intel_context_create_request(ce);
intel_context_put(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out;
}
if (rq->engine->emit_init_breadcrumb) {
err = rq->engine->emit_init_breadcrumb(rq);
if (err) {
i915_request_add(rq);
goto out;
}
}
cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs)) {
i915_request_add(rq);
err = PTR_ERR(cs);
goto out;
}
if (p->error[i]) {
*cs++ = 0xdeadbeef;
*cs++ = 0xdeadbeef;
} else {
*cs++ = MI_NOOP;
*cs++ = MI_NOOP;
}
client[i] = i915_request_get(rq);
i915_request_add(rq);
}
err = wait_for_submit(engine, client[0], HZ / 2);
if (err) {
pr_err("%s: first request did not start within time!\n",
engine->name);
err = -ETIME;
goto out;
}
for (i = 0; i < ARRAY_SIZE(client); i++) {
if (i915_request_wait(client[i], 0, HZ / 5) < 0)
pr_debug("%s: %s request incomplete!\n",
engine->name,
error_repr(p->error[i]));
if (!i915_request_started(client[i])) {
pr_err("%s: %s request not started!\n",
engine->name,
error_repr(p->error[i]));
err = -ETIME;
goto out;
}
/* Kick the tasklet to process the error */
intel_engine_flush_submission(engine);
if (client[i]->fence.error != p->error[i]) {
pr_err("%s: %s request (%s) with wrong error code: %d\n",
engine->name,
error_repr(p->error[i]),
i915_request_completed(client[i]) ? "completed" : "running",
client[i]->fence.error);
err = -EINVAL;
goto out;
}
}
out:
for (i = 0; i < ARRAY_SIZE(client); i++)
if (client[i])
i915_request_put(client[i]);
if (err) {
pr_err("%s: failed at phase[%zd] { %d, %d }\n",
engine->name, p - phases,
p->error[0], p->error[1]);
break;
}
}
st_engine_heartbeat_enable(engine);
if (err) {
intel_gt_set_wedged(gt);
return err;
}
}
return 0;
}
static int
emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
{
u32 *cs;
cs = intel_ring_begin(rq, 10);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
*cs++ = MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_NEQ_SDD;
*cs++ = 0;
*cs++ = i915_ggtt_offset(vma) + 4 * idx;
*cs++ = 0;
if (idx > 0) {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
*cs++ = 0;
*cs++ = 1;
} else {
*cs++ = MI_NOOP;
*cs++ = MI_NOOP;
*cs++ = MI_NOOP;
*cs++ = MI_NOOP;
}
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
intel_ring_advance(rq, cs);
return 0;
}
static struct i915_request *
semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
{
struct intel_context *ce;
struct i915_request *rq;
int err;
ce = intel_context_create(engine);
if (IS_ERR(ce))
return ERR_CAST(ce);
rq = intel_context_create_request(ce);
if (IS_ERR(rq))
goto out_ce;
err = 0;
if (rq->engine->emit_init_breadcrumb)
err = rq->engine->emit_init_breadcrumb(rq);
if (err == 0)
err = emit_semaphore_chain(rq, vma, idx);
if (err == 0)
i915_request_get(rq);
i915_request_add(rq);
if (err)
rq = ERR_PTR(err);
out_ce:
intel_context_put(ce);
return rq;
}
static int
release_queue(struct intel_engine_cs *engine,
struct i915_vma *vma,
int idx, int prio)
{
struct i915_sched_attr attr = {
.priority = prio,
};
struct i915_request *rq;
u32 *cs;
rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq))
return PTR_ERR(rq);
cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs)) {
i915_request_add(rq);
return PTR_ERR(cs);
}
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
*cs++ = 0;
*cs++ = 1;
intel_ring_advance(rq, cs);
i915_request_get(rq);
i915_request_add(rq);
local_bh_disable();
engine->sched_engine->schedule(rq, &attr);
local_bh_enable(); /* kick tasklet */
i915_request_put(rq);
return 0;
}
static int
slice_semaphore_queue(struct intel_engine_cs *outer,
struct i915_vma *vma,
int count)
{
struct intel_engine_cs *engine;
struct i915_request *head;
enum intel_engine_id id;
int err, i, n = 0;
head = semaphore_queue(outer, vma, n++);
if (IS_ERR(head))
return PTR_ERR(head);
for_each_engine(engine, outer->gt, id) {
if (!intel_engine_has_preemption(engine))
continue;
for (i = 0; i < count; i++) {
struct i915_request *rq;
rq = semaphore_queue(engine, vma, n++);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out;
}
i915_request_put(rq);
}
}
err = release_queue(outer, vma, n, I915_PRIORITY_BARRIER);
if (err)
goto out;
if (i915_request_wait(head, 0,
2 * outer->gt->info.num_engines * (count + 2) * (count + 3)) < 0) {
pr_err("%s: Failed to slice along semaphore chain of length (%d, %d)!\n",
outer->name, count, n);
GEM_TRACE_DUMP();
intel_gt_set_wedged(outer->gt);
err = -EIO;
}
out:
i915_request_put(head);
return err;
}
static int live_timeslice_preempt(void *arg)
{
struct intel_gt *gt = arg;
struct drm_i915_gem_object *obj;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct i915_vma *vma;
void *vaddr;
int err = 0;
/*
* If a request takes too long, we would like to give other users
* a fair go on the GPU. In particular, users may create batches
* that wait upon external input, where that input may even be
* supplied by another GPU job. To avoid blocking forever, we
* need to preempt the current task and replace it with another
* ready task.
*/
if (!CONFIG_DRM_I915_TIMESLICE_DURATION)
return 0;
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
}
vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_obj;
}
err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
if (err)
goto err_map;
err = i915_vma_sync(vma);
if (err)
goto err_pin;
for_each_engine(engine, gt, id) {
if (!intel_engine_has_preemption(engine))
continue;
memset(vaddr, 0, PAGE_SIZE);
st_engine_heartbeat_disable(engine);
err = slice_semaphore_queue(engine, vma, 5);
st_engine_heartbeat_enable(engine);
if (err)
goto err_pin;
if (igt_flush_test(gt->i915)) {
err = -EIO;
goto err_pin;
}
}
err_pin:
i915_vma_unpin(vma);
err_map:
i915_gem_object_unpin_map(obj);
err_obj:
i915_gem_object_put(obj);
return err;
}
static struct i915_request *
create_rewinder(struct intel_context *ce,
struct i915_request *wait,
void *slot, int idx)
{
const u32 offset =
i915_ggtt_offset(ce->engine->status_page.vma) +
offset_in_page(slot);
struct i915_request *rq;
u32 *cs;
int err;
rq = intel_context_create_request(ce);
if (IS_ERR(rq))
return rq;
if (wait) {
err = i915_request_await_dma_fence(rq, &wait->fence);
if (err)
goto err;
}
cs = intel_ring_begin(rq, 14);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err;
}
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
*cs++ = MI_NOOP;
*cs++ = MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_GTE_SDD;
*cs++ = idx;
*cs++ = offset;
*cs++ = 0;
*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
*cs++ = offset + idx * sizeof(u32);
*cs++ = 0;
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = offset;
*cs++ = 0;
*cs++ = idx + 1;
intel_ring_advance(rq, cs);
err = 0;
err:
i915_request_get(rq);
i915_request_add(rq);
if (err) {
i915_request_put(rq);
return ERR_PTR(err);
}
return rq;
}
static int live_timeslice_rewind(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
/*
* The usual presumption on timeslice expiration is that we replace
* the active context with another. However, given a chain of
* dependencies we may end up with replacing the context with itself,
* but only a few of those requests, forcing us to rewind the
* RING_TAIL of the original request.
*/
if (!CONFIG_DRM_I915_TIMESLICE_DURATION)
return 0;
for_each_engine(engine, gt, id) {
enum { A1, A2, B1 };
enum { X = 1, Z, Y };
struct i915_request *rq[3] = {};
struct intel_context *ce;
unsigned long timeslice;
int i, err = 0;
u32 *slot;
if (!intel_engine_has_timeslices(engine))
continue;
/*
* A:rq1 -- semaphore wait, timestamp X
* A:rq2 -- write timestamp Y
*
* B:rq1 [await A:rq1] -- write timestamp Z
*
* Force timeslice, release semaphore.
*
* Expect execution/evaluation order XZY
*/
st_engine_heartbeat_disable(engine);
timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
slot = memset32(engine->status_page.addr + 1000, 0, 4);
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto err;
}
rq[A1] = create_rewinder(ce, NULL, slot, X);
if (IS_ERR(rq[A1])) {
intel_context_put(ce);
goto err;
}
rq[A2] = create_rewinder(ce, NULL, slot, Y);
intel_context_put(ce);
if (IS_ERR(rq[A2]))
goto err;
err = wait_for_submit(engine, rq[A2], HZ / 2);
if (err) {
pr_err("%s: failed to submit first context\n",
engine->name);
goto err;
}
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto err;
}
rq[B1] = create_rewinder(ce, rq[A1], slot, Z);
intel_context_put(ce);
if (IS_ERR(rq[2]))
goto err;
err = wait_for_submit(engine, rq[B1], HZ / 2);
if (err) {
pr_err("%s: failed to submit second context\n",
engine->name);
goto err;
}
/* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */
ENGINE_TRACE(engine, "forcing tasklet for rewind\n");
while (i915_request_is_active(rq[A2])) { /* semaphore yield! */
/* Wait for the timeslice to kick in */
del_timer(&engine->execlists.timer);
tasklet_hi_schedule(&engine->sched_engine->tasklet);
intel_engine_flush_submission(engine);
}
/* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */
GEM_BUG_ON(!i915_request_is_active(rq[A1]));
GEM_BUG_ON(!i915_request_is_active(rq[B1]));
GEM_BUG_ON(i915_request_is_active(rq[A2]));
/* Release the hounds! */
slot[0] = 1;
wmb(); /* "pairs" with GPU; paranoid kick of internal CPU$ */
for (i = 1; i <= 3; i++) {
unsigned long timeout = jiffies + HZ / 2;
while (!READ_ONCE(slot[i]) &&
time_before(jiffies, timeout))
;
if (!time_before(jiffies, timeout)) {
pr_err("%s: rq[%d] timed out\n",
engine->name, i - 1);
err = -ETIME;
goto err;
}
pr_debug("%s: slot[%d]:%x\n", engine->name, i, slot[i]);
}
/* XZY: XZ < XY */
if (slot[Z] - slot[X] >= slot[Y] - slot[X]) {
pr_err("%s: timeslicing did not run context B [%u] before A [%u]!\n",
engine->name,
slot[Z] - slot[X],
slot[Y] - slot[X]);
err = -EINVAL;
}
err:
memset32(&slot[0], -1, 4);
wmb();
engine->props.timeslice_duration_ms = timeslice;
st_engine_heartbeat_enable(engine);
for (i = 0; i < 3; i++)
i915_request_put(rq[i]);
if (igt_flush_test(gt->i915))
err = -EIO;
if (err)
return err;
}
return 0;
}
static struct i915_request *nop_request(struct intel_engine_cs *engine)
{
struct i915_request *rq;
rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq))
return rq;
i915_request_get(rq);
i915_request_add(rq);
return rq;
}
static long slice_timeout(struct intel_engine_cs *engine)
{
long timeout;
/* Enough time for a timeslice to kick in, and kick out */
timeout = 2 * msecs_to_jiffies_timeout(timeslice(engine));
/* Enough time for the nop request to complete */
timeout += HZ / 5;
return timeout + 1;
}
static int live_timeslice_queue(void *arg)
{
struct intel_gt *gt = arg;
struct drm_i915_gem_object *obj;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct i915_vma *vma;
void *vaddr;
int err = 0;
/*
* Make sure that even if ELSP[0] and ELSP[1] are filled with
* timeslicing between them disabled, we *do* enable timeslicing
* if the queue demands it. (Normally, we do not submit if
* ELSP[1] is already occupied, so must rely on timeslicing to
* eject ELSP[0] in favour of the queue.)
*/
if (!CONFIG_DRM_I915_TIMESLICE_DURATION)
return 0;
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
}
vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_obj;
}
err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
if (err)
goto err_map;
err = i915_vma_sync(vma);
if (err)
goto err_pin;
for_each_engine(engine, gt, id) {
struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX };
struct i915_request *rq, *nop;
if (!intel_engine_has_preemption(engine))
continue;
st_engine_heartbeat_disable(engine);
memset(vaddr, 0, PAGE_SIZE);
/* ELSP[0]: semaphore wait */
rq = semaphore_queue(engine, vma, 0);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_heartbeat;
}
engine->sched_engine->schedule(rq, &attr);
err = wait_for_submit(engine, rq, HZ / 2);
if (err) {
pr_err("%s: Timed out trying to submit semaphores\n",
engine->name);
goto err_rq;
}
/* ELSP[1]: nop request */
nop = nop_request(engine);
if (IS_ERR(nop)) {
err = PTR_ERR(nop);
goto err_rq;
}
err = wait_for_submit(engine, nop, HZ / 2);
i915_request_put(nop);
if (err) {
pr_err("%s: Timed out trying to submit nop\n",
engine->name);
goto err_rq;
}
GEM_BUG_ON(i915_request_completed(rq));
GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
/* Queue: semaphore signal, matching priority as semaphore */
err = release_queue(engine, vma, 1, effective_prio(rq));
if (err)
goto err_rq;
/* Wait until we ack the release_queue and start timeslicing */
do {
cond_resched();
intel_engine_flush_submission(engine);
} while (READ_ONCE(engine->execlists.pending[0]));
/* Timeslice every jiffy, so within 2 we should signal */
if (i915_request_wait(rq, 0, slice_timeout(engine)) < 0) {
struct drm_printer p =
drm_info_printer(gt->i915->drm.dev);
pr_err("%s: Failed to timeslice into queue\n",
engine->name);
intel_engine_dump(engine, &p,
"%s\n", engine->name);
memset(vaddr, 0xff, PAGE_SIZE);
err = -EIO;
}
err_rq:
i915_request_put(rq);
err_heartbeat:
st_engine_heartbeat_enable(engine);
if (err)
break;
}
err_pin:
i915_vma_unpin(vma);
err_map:
i915_gem_object_unpin_map(obj);
err_obj:
i915_gem_object_put(obj);
return err;
}
static int live_timeslice_nopreempt(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct igt_spinner spin;
int err = 0;
/*
* We should not timeslice into a request that is marked with
* I915_REQUEST_NOPREEMPT.
*/
if (!CONFIG_DRM_I915_TIMESLICE_DURATION)
return 0;
if (igt_spinner_init(&spin, gt))
return -ENOMEM;
for_each_engine(engine, gt, id) {
struct intel_context *ce;
struct i915_request *rq;
unsigned long timeslice;
if (!intel_engine_has_preemption(engine))
continue;
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
break;
}
st_engine_heartbeat_disable(engine);
timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
/* Create an unpreemptible spinner */
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
intel_context_put(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_heartbeat;
}
i915_request_get(rq);
i915_request_add(rq);
if (!igt_wait_for_spinner(&spin, rq)) {
i915_request_put(rq);
err = -ETIME;
goto out_spin;
}
set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags);
i915_request_put(rq);
/* Followed by a maximum priority barrier (heartbeat) */
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out_spin;
}
rq = intel_context_create_request(ce);
intel_context_put(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_spin;
}
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
i915_request_get(rq);
i915_request_add(rq);
/*
* Wait until the barrier is in ELSP, and we know timeslicing
* will have been activated.
*/
if (wait_for_submit(engine, rq, HZ / 2)) {
i915_request_put(rq);
err = -ETIME;
goto out_spin;
}
/*
* Since the ELSP[0] request is unpreemptible, it should not
* allow the maximum priority barrier through. Wait long
* enough to see if it is timesliced in by mistake.
*/
if (i915_request_wait(rq, 0, slice_timeout(engine)) >= 0) {
pr_err("%s: I915_PRIORITY_BARRIER request completed, bypassing no-preempt request\n",
engine->name);
err = -EINVAL;
}
i915_request_put(rq);
out_spin:
igt_spinner_end(&spin);
out_heartbeat:
xchg(&engine->props.timeslice_duration_ms, timeslice);
st_engine_heartbeat_enable(engine);
if (err)
break;
if (igt_flush_test(gt->i915)) {
err = -EIO;
break;
}
}
igt_spinner_fini(&spin);
return err;
}
static int live_busywait_preempt(void *arg)
{
struct intel_gt *gt = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
struct intel_engine_cs *engine;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
enum intel_engine_id id;
u32 *map;
int err;
/*
* Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
* preempt the busywaits used to synchronise between rings.
*/
ctx_hi = kernel_context(gt->i915, NULL);
if (IS_ERR(ctx_hi))
return PTR_ERR(ctx_hi);
ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
ctx_lo = kernel_context(gt->i915, NULL);
if (IS_ERR(ctx_lo)) {
err = PTR_ERR(ctx_lo);
goto err_ctx_hi;
}
ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto err_ctx_lo;
}
map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(map)) {
err = PTR_ERR(map);
goto err_obj;
}
vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_map;
}
err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
if (err)
goto err_map;
err = i915_vma_sync(vma);
if (err)
goto err_vma;
for_each_engine(engine, gt, id) {
struct i915_request *lo, *hi;
struct igt_live_test t;
u32 *cs;
if (!intel_engine_has_preemption(engine))
continue;
if (!intel_engine_can_store_dword(engine))
continue;
if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
err = -EIO;
goto err_vma;
}
/*
* We create two requests. The low priority request
* busywaits on a semaphore (inside the ringbuffer where
* is should be preemptible) and the high priority requests
* uses a MI_STORE_DWORD_IMM to update the semaphore value
* allowing the first request to complete. If preemption
* fails, we hang instead.
*/
lo = igt_request_alloc(ctx_lo, engine);
if (IS_ERR(lo)) {
err = PTR_ERR(lo);
goto err_vma;
}
cs = intel_ring_begin(lo, 8);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
i915_request_add(lo);
goto err_vma;
}
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = i915_ggtt_offset(vma);
*cs++ = 0;
*cs++ = 1;
/* XXX Do we need a flush + invalidate here? */
*cs++ = MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_EQ_SDD;
*cs++ = 0;
*cs++ = i915_ggtt_offset(vma);
*cs++ = 0;
intel_ring_advance(lo, cs);
i915_request_get(lo);
i915_request_add(lo);
if (wait_for(READ_ONCE(*map), 10)) {
i915_request_put(lo);
err = -ETIMEDOUT;
goto err_vma;
}
/* Low priority request should be busywaiting now */
if (i915_request_wait(lo, 0, 1) != -ETIME) {
i915_request_put(lo);
pr_err("%s: Busywaiting request did not!\n",
engine->name);
err = -EIO;
goto err_vma;
}
hi = igt_request_alloc(ctx_hi, engine);
if (IS_ERR(hi)) {
err = PTR_ERR(hi);
i915_request_put(lo);
goto err_vma;
}
cs = intel_ring_begin(hi, 4);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
i915_request_add(hi);
i915_request_put(lo);
goto err_vma;
}
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = i915_ggtt_offset(vma);
*cs++ = 0;
*cs++ = 0;
intel_ring_advance(hi, cs);
i915_request_add(hi);
if (i915_request_wait(lo, 0, HZ / 5) < 0) {
struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("%s: Failed to preempt semaphore busywait!\n",
engine->name);
intel_engine_dump(engine, &p, "%s\n", engine->name);
GEM_TRACE_DUMP();
i915_request_put(lo);
intel_gt_set_wedged(gt);
err = -EIO;
goto err_vma;
}
GEM_BUG_ON(READ_ONCE(*map));
i915_request_put(lo);
if (igt_live_test_end(&t)) {
err = -EIO;
goto err_vma;
}
}
err = 0;
err_vma:
i915_vma_unpin(vma);
err_map:
i915_gem_object_unpin_map(obj);
err_obj:
i915_gem_object_put(obj);
err_ctx_lo:
kernel_context_close(ctx_lo);
err_ctx_hi:
kernel_context_close(ctx_hi);
return err;
}
static struct i915_request *
spinner_create_request(struct igt_spinner *spin,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
u32 arb)
{
struct intel_context *ce;
struct i915_request *rq;
ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
if (IS_ERR(ce))
return ERR_CAST(ce);
rq = igt_spinner_create_request(spin, ce, arb);
intel_context_put(ce);
return rq;
}
static int live_preempt(void *arg)
{
struct intel_gt *gt = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = -ENOMEM;
ctx_hi = kernel_context(gt->i915, NULL);
if (!ctx_hi)
return -ENOMEM;
ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
ctx_lo = kernel_context(gt->i915, NULL);
if (!ctx_lo)
goto err_ctx_hi;
ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
if (igt_spinner_init(&spin_hi, gt))
goto err_ctx_lo;
if (igt_spinner_init(&spin_lo, gt))
goto err_spin_hi;
for_each_engine(engine, gt, id) {
struct igt_live_test t;
struct i915_request *rq;
if (!intel_engine_has_preemption(engine))
continue;
if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
err = -EIO;
goto err_spin_lo;
}
rq = spinner_create_request(&spin_lo, ctx_lo, engine,
MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_spin_lo;
}
i915_request_add(rq);
if (!igt_wait_for_spinner(&spin_lo, rq)) {
GEM_TRACE("lo spinner failed to start\n");
GEM_TRACE_DUMP();
intel_gt_set_wedged(gt);
err = -EIO;
goto err_spin_lo;
}
rq = spinner_create_request(&spin_hi, ctx_hi, engine,
MI_ARB_CHECK);
if (IS_ERR(rq)) {
igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
goto err_spin_lo;
}
i915_request_add(rq);
if (!igt_wait_for_spinner(&spin_hi, rq)) {
GEM_TRACE("hi spinner failed to start\n");
GEM_TRACE_DUMP();
intel_gt_set_wedged(gt);
err = -EIO;
goto err_spin_lo;
}
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
if (igt_live_test_end(&t)) {
err = -EIO;
goto err_spin_lo;
}
}
err = 0;
err_spin_lo:
igt_spinner_fini(&spin_lo);
err_spin_hi:
igt_spinner_fini(&spin_hi);
err_ctx_lo:
kernel_context_close(ctx_lo);
err_ctx_hi:
kernel_context_close(ctx_hi);
return err;
}
static int live_late_preempt(void *arg)
{
struct intel_gt *gt = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
struct i915_sched_attr attr = {};
enum intel_engine_id id;
int err = -ENOMEM;
ctx_hi = kernel_context(gt->i915, NULL);
if (!ctx_hi)
return -ENOMEM;
ctx_lo = kernel_context(gt->i915, NULL);
if (!ctx_lo)
goto err_ctx_hi;
if (igt_spinner_init(&spin_hi, gt))
goto err_ctx_lo;
if (igt_spinner_init(&spin_lo, gt))
goto err_spin_hi;
/* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */
ctx_lo->sched.priority = 1;
for_each_engine(engine, gt, id) {
struct igt_live_test t;
struct i915_request *rq;
if (!intel_engine_has_preemption(engine))
continue;
if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
err = -EIO;
goto err_spin_lo;
}
rq = spinner_create_request(&spin_lo, ctx_lo, engine,
MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_spin_lo;
}
i915_request_add(rq);
if (!igt_wait_for_spinner(&spin_lo, rq)) {
pr_err("First context failed to start\n");
goto err_wedged;
}
rq = spinner_create_request(&spin_hi, ctx_hi, engine,
MI_NOOP);
if (IS_ERR(rq)) {
igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
goto err_spin_lo;
}
i915_request_add(rq);
if (igt_wait_for_spinner(&spin_hi, rq)) {
pr_err("Second context overtook first?\n");
goto err_wedged;
}
attr.priority = I915_PRIORITY_MAX;
engine->sched_engine->schedule(rq, &attr);
if (!igt_wait_for_spinner(&spin_hi, rq)) {
pr_err("High priority context failed to preempt the low priority context\n");
GEM_TRACE_DUMP();
goto err_wedged;
}
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
if (igt_live_test_end(&t)) {
err = -EIO;
goto err_spin_lo;
}
}
err = 0;
err_spin_lo:
igt_spinner_fini(&spin_lo);
err_spin_hi:
igt_spinner_fini(&spin_hi);
err_ctx_lo:
kernel_context_close(ctx_lo);
err_ctx_hi:
kernel_context_close(ctx_hi);
return err;
err_wedged:
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
intel_gt_set_wedged(gt);
err = -EIO;
goto err_spin_lo;
}
struct preempt_client {
struct igt_spinner spin;
struct i915_gem_context *ctx;
};
static int preempt_client_init(struct intel_gt *gt, struct preempt_client *c)
{
c->ctx = kernel_context(gt->i915, NULL);
if (!c->ctx)
return -ENOMEM;
if (igt_spinner_init(&c->spin, gt))
goto err_ctx;
return 0;
err_ctx:
kernel_context_close(c->ctx);
return -ENOMEM;
}
static void preempt_client_fini(struct preempt_client *c)
{
igt_spinner_fini(&c->spin);
kernel_context_close(c->ctx);
}
static int live_nopreempt(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
struct preempt_client a, b;
enum intel_engine_id id;
int err = -ENOMEM;
/*
* Verify that we can disable preemption for an individual request
* that may be being observed and not want to be interrupted.
*/
if (preempt_client_init(gt, &a))
return -ENOMEM;
if (preempt_client_init(gt, &b))
goto err_client_a;
b.ctx->sched.priority = I915_PRIORITY_MAX;
for_each_engine(engine, gt, id) {
struct i915_request *rq_a, *rq_b;
if (!intel_engine_has_preemption(engine))
continue;
engine->execlists.preempt_hang.count = 0;
rq_a = spinner_create_request(&a.spin,
a.ctx, engine,
MI_ARB_CHECK);
if (IS_ERR(rq_a)) {
err = PTR_ERR(rq_a);
goto err_client_b;
}
/* Low priority client, but unpreemptable! */
__set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq_a->fence.flags);
i915_request_add(rq_a);
if (!igt_wait_for_spinner(&a.spin, rq_a)) {
pr_err("First client failed to start\n");
goto err_wedged;
}
rq_b = spinner_create_request(&b.spin,
b.ctx, engine,
MI_ARB_CHECK);
if (IS_ERR(rq_b)) {
err = PTR_ERR(rq_b);
goto err_client_b;
}
i915_request_add(rq_b);
/* B is much more important than A! (But A is unpreemptable.) */
GEM_BUG_ON(rq_prio(rq_b) <= rq_prio(rq_a));
/* Wait long enough for preemption and timeslicing */
if (igt_wait_for_spinner(&b.spin, rq_b)) {
pr_err("Second client started too early!\n");
goto err_wedged;
}
igt_spinner_end(&a.spin);
if (!igt_wait_for_spinner(&b.spin, rq_b)) {
pr_err("Second client failed to start\n");
goto err_wedged;
}
igt_spinner_end(&b.spin);
if (engine->execlists.preempt_hang.count) {
pr_err("Preemption recorded x%d; should have been suppressed!\n",
engine->execlists.preempt_hang.count);
err = -EINVAL;
goto err_wedged;
}
if (igt_flush_test(gt->i915))
goto err_wedged;
}
err = 0;
err_client_b:
preempt_client_fini(&b);
err_client_a:
preempt_client_fini(&a);
return err;
err_wedged:
igt_spinner_end(&b.spin);
igt_spinner_end(&a.spin);
intel_gt_set_wedged(gt);
err = -EIO;
goto err_client_b;
}
struct live_preempt_cancel {
struct intel_engine_cs *engine;
struct preempt_client a, b;
};
static int __cancel_active0(struct live_preempt_cancel *arg)
{
struct i915_request *rq;
struct igt_live_test t;
int err;
/* Preempt cancel of ELSP0 */
GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
if (igt_live_test_begin(&t, arg->engine->i915,
__func__, arg->engine->name))
return -EIO;
rq = spinner_create_request(&arg->a.spin,
arg->a.ctx, arg->engine,
MI_ARB_CHECK);
if (IS_ERR(rq))
return PTR_ERR(rq);
clear_bit(CONTEXT_BANNED, &rq->context->flags);
i915_request_get(rq);
i915_request_add(rq);
if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
err = -EIO;
goto out;
}
intel_context_ban(rq->context, rq);
err = intel_engine_pulse(arg->engine);
if (err)
goto out;
err = wait_for_reset(arg->engine, rq, HZ / 2);
if (err) {
pr_err("Cancelled inflight0 request did not reset\n");
goto out;
}
out:
i915_request_put(rq);
if (igt_live_test_end(&t))
err = -EIO;
return err;
}
static int __cancel_active1(struct live_preempt_cancel *arg)
{
struct i915_request *rq[2] = {};
struct igt_live_test t;
int err;
/* Preempt cancel of ELSP1 */
GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
if (igt_live_test_begin(&t, arg->engine->i915,
__func__, arg->engine->name))
return -EIO;
rq[0] = spinner_create_request(&arg->a.spin,
arg->a.ctx, arg->engine,
MI_NOOP); /* no preemption */
if (IS_ERR(rq[0]))
return PTR_ERR(rq[0]);
clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
i915_request_get(rq[0]);
i915_request_add(rq[0]);
if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
err = -EIO;
goto out;
}
rq[1] = spinner_create_request(&arg->b.spin,
arg->b.ctx, arg->engine,
MI_ARB_CHECK);
if (IS_ERR(rq[1])) {
err = PTR_ERR(rq[1]);
goto out;
}
clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
i915_request_get(rq[1]);
err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
i915_request_add(rq[1]);
if (err)
goto out;
intel_context_ban(rq[1]->context, rq[1]);
err = intel_engine_pulse(arg->engine);
if (err)
goto out;
igt_spinner_end(&arg->a.spin);
err = wait_for_reset(arg->engine, rq[1], HZ / 2);
if (err)
goto out;
if (rq[0]->fence.error != 0) {
pr_err("Normal inflight0 request did not complete\n");
err = -EINVAL;
goto out;
}
if (rq[1]->fence.error != -EIO) {
pr_err("Cancelled inflight1 request did not report -EIO\n");
err = -EINVAL;
goto out;
}
out:
i915_request_put(rq[1]);
i915_request_put(rq[0]);
if (igt_live_test_end(&t))
err = -EIO;
return err;
}
static int __cancel_queued(struct live_preempt_cancel *arg)
{
struct i915_request *rq[3] = {};
struct igt_live_test t;
int err;
/* Full ELSP and one in the wings */
GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
if (igt_live_test_begin(&t, arg->engine->i915,
__func__, arg->engine->name))
return -EIO;
rq[0] = spinner_create_request(&arg->a.spin,
arg->a.ctx, arg->engine,
MI_ARB_CHECK);
if (IS_ERR(rq[0]))
return PTR_ERR(rq[0]);
clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
i915_request_get(rq[0]);
i915_request_add(rq[0]);
if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
err = -EIO;
goto out;
}
rq[1] = igt_request_alloc(arg->b.ctx, arg->engine);
if (IS_ERR(rq[1])) {
err = PTR_ERR(rq[1]);
goto out;
}
clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
i915_request_get(rq[1]);
err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
i915_request_add(rq[1]);
if (err)
goto out;
rq[2] = spinner_create_request(&arg->b.spin,
arg->a.ctx, arg->engine,
MI_ARB_CHECK);
if (IS_ERR(rq[2])) {
err = PTR_ERR(rq[2]);
goto out;
}
i915_request_get(rq[2]);
err = i915_request_await_dma_fence(rq[2], &rq[1]->fence);
i915_request_add(rq[2]);
if (err)
goto out;
intel_context_ban(rq[2]->context, rq[2]);
err = intel_engine_pulse(arg->engine);
if (err)
goto out;
err = wait_for_reset(arg->engine, rq[2], HZ / 2);
if (err)
goto out;
if (rq[0]->fence.error != -EIO) {
pr_err("Cancelled inflight0 request did not report -EIO\n");
err = -EINVAL;
goto out;
}
/*
* The behavior between having semaphores and not is different. With
* semaphores the subsequent request is on the hardware and not cancelled
* while without the request is held in the driver and cancelled.
*/
if (intel_engine_has_semaphores(rq[1]->engine) &&
rq[1]->fence.error != 0) {
pr_err("Normal inflight1 request did not complete\n");
err = -EINVAL;
goto out;
}
if (rq[2]->fence.error != -EIO) {
pr_err("Cancelled queued request did not report -EIO\n");
err = -EINVAL;
goto out;
}
out:
i915_request_put(rq[2]);
i915_request_put(rq[1]);
i915_request_put(rq[0]);
if (igt_live_test_end(&t))
err = -EIO;
return err;
}
static int __cancel_hostile(struct live_preempt_cancel *arg)
{
struct i915_request *rq;
int err;
/* Preempt cancel non-preemptible spinner in ELSP0 */
if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
return 0;
if (!intel_has_reset_engine(arg->engine->gt))
return 0;
GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
rq = spinner_create_request(&arg->a.spin,
arg->a.ctx, arg->engine,
MI_NOOP); /* preemption disabled */
if (IS_ERR(rq))
return PTR_ERR(rq);
clear_bit(CONTEXT_BANNED, &rq->context->flags);
i915_request_get(rq);
i915_request_add(rq);
if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
err = -EIO;
goto out;
}
intel_context_ban(rq->context, rq);
err = intel_engine_pulse(arg->engine); /* force reset */
if (err)
goto out;
err = wait_for_reset(arg->engine, rq, HZ / 2);
if (err) {
pr_err("Cancelled inflight0 request did not reset\n");
goto out;
}
out:
i915_request_put(rq);
if (igt_flush_test(arg->engine->i915))
err = -EIO;
return err;
}
static void force_reset_timeout(struct intel_engine_cs *engine)
{
engine->reset_timeout.probability = 999;
atomic_set(&engine->reset_timeout.times, -1);
}
static void cancel_reset_timeout(struct intel_engine_cs *engine)
{
memset(&engine->reset_timeout, 0, sizeof(engine->reset_timeout));
}
static int __cancel_fail(struct live_preempt_cancel *arg)
{
struct intel_engine_cs *engine = arg->engine;
struct i915_request *rq;
int err;
if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
return 0;
if (!intel_has_reset_engine(engine->gt))
return 0;
GEM_TRACE("%s(%s)\n", __func__, engine->name);
rq = spinner_create_request(&arg->a.spin,
arg->a.ctx, engine,
MI_NOOP); /* preemption disabled */
if (IS_ERR(rq))
return PTR_ERR(rq);
clear_bit(CONTEXT_BANNED, &rq->context->flags);
i915_request_get(rq);
i915_request_add(rq);
if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
err = -EIO;
goto out;
}
intel_context_set_banned(rq->context);
err = intel_engine_pulse(engine);
if (err)
goto out;
force_reset_timeout(engine);
/* force preempt reset [failure] */
while (!engine->execlists.pending[0])
intel_engine_flush_submission(engine);
del_timer_sync(&engine->execlists.preempt);
intel_engine_flush_submission(engine);
cancel_reset_timeout(engine);
/* after failure, require heartbeats to reset device */
intel_engine_set_heartbeat(engine, 1);
err = wait_for_reset(engine, rq, HZ / 2);
intel_engine_set_heartbeat(engine,
engine->defaults.heartbeat_interval_ms);
if (err) {
pr_err("Cancelled inflight0 request did not reset\n");
goto out;
}
out:
i915_request_put(rq);
if (igt_flush_test(engine->i915))
err = -EIO;
return err;
}
static int live_preempt_cancel(void *arg)
{
struct intel_gt *gt = arg;
struct live_preempt_cancel data;
enum intel_engine_id id;
int err = -ENOMEM;
/*
* To cancel an inflight context, we need to first remove it from the
* GPU. That sounds like preemption! Plus a little bit of bookkeeping.
*/
if (preempt_client_init(gt, &data.a))
return -ENOMEM;
if (preempt_client_init(gt, &data.b))
goto err_client_a;
for_each_engine(data.engine, gt, id) {
if (!intel_engine_has_preemption(data.engine))
continue;
err = __cancel_active0(&data);
if (err)
goto err_wedged;
err = __cancel_active1(&data);
if (err)
goto err_wedged;
err = __cancel_queued(&data);
if (err)
goto err_wedged;
err = __cancel_hostile(&data);
if (err)
goto err_wedged;
err = __cancel_fail(&data);
if (err)
goto err_wedged;
}
err = 0;
err_client_b:
preempt_client_fini(&data.b);
err_client_a:
preempt_client_fini(&data.a);
return err;
err_wedged:
GEM_TRACE_DUMP();
igt_spinner_end(&data.b.spin);
igt_spinner_end(&data.a.spin);
intel_gt_set_wedged(gt);
goto err_client_b;
}
static int live_suppress_self_preempt(void *arg)
{
struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX };
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
struct preempt_client a, b;
enum intel_engine_id id;
int err = -ENOMEM;
/*
* Verify that if a preemption request does not cause a change in
* the current execution order, the preempt-to-idle injection is
* skipped and that we do not accidentally apply it after the CS
* completion event.
*/
if (intel_uc_uses_guc_submission(>->uc))
return 0; /* presume black blox */
if (intel_vgpu_active(gt->i915))
return 0; /* GVT forces single port & request submission */
if (preempt_client_init(gt, &a))
return -ENOMEM;
if (preempt_client_init(gt, &b))
goto err_client_a;
for_each_engine(engine, gt, id) {
struct i915_request *rq_a, *rq_b;
int depth;
if (!intel_engine_has_preemption(engine))
continue;
if (igt_flush_test(gt->i915))
goto err_wedged;
st_engine_heartbeat_disable(engine);
engine->execlists.preempt_hang.count = 0;
rq_a = spinner_create_request(&a.spin,
a.ctx, engine,
MI_NOOP);
if (IS_ERR(rq_a)) {
err = PTR_ERR(rq_a);
st_engine_heartbeat_enable(engine);
goto err_client_b;
}
i915_request_add(rq_a);
if (!igt_wait_for_spinner(&a.spin, rq_a)) {
pr_err("First client failed to start\n");
st_engine_heartbeat_enable(engine);
goto err_wedged;
}
/* Keep postponing the timer to avoid premature slicing */
mod_timer(&engine->execlists.timer, jiffies + HZ);
for (depth = 0; depth < 8; depth++) {
rq_b = spinner_create_request(&b.spin,
b.ctx, engine,
MI_NOOP);
if (IS_ERR(rq_b)) {
err = PTR_ERR(rq_b);
st_engine_heartbeat_enable(engine);
goto err_client_b;
}
i915_request_add(rq_b);
GEM_BUG_ON(i915_request_completed(rq_a));
engine->sched_engine->schedule(rq_a, &attr);
igt_spinner_end(&a.spin);
if (!igt_wait_for_spinner(&b.spin, rq_b)) {
pr_err("Second client failed to start\n");
st_engine_heartbeat_enable(engine);
goto err_wedged;
}
swap(a, b);
rq_a = rq_b;
}
igt_spinner_end(&a.spin);
if (engine->execlists.preempt_hang.count) {
pr_err("Preemption on %s recorded x%d, depth %d; should have been suppressed!\n",
engine->name,
engine->execlists.preempt_hang.count,
depth);
st_engine_heartbeat_enable(engine);
err = -EINVAL;
goto err_client_b;
}
st_engine_heartbeat_enable(engine);
if (igt_flush_test(gt->i915))
goto err_wedged;
}
err = 0;
err_client_b:
preempt_client_fini(&b);
err_client_a:
preempt_client_fini(&a);
return err;
err_wedged:
igt_spinner_end(&b.spin);
igt_spinner_end(&a.spin);
intel_gt_set_wedged(gt);
err = -EIO;
goto err_client_b;
}
static int live_chain_preempt(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
struct preempt_client hi, lo;
enum intel_engine_id id;
int err = -ENOMEM;
/*
* Build a chain AB...BA between two contexts (A, B) and request
* preemption of the last request. It should then complete before
* the previously submitted spinner in B.
*/
if (preempt_client_init(gt, &hi))
return -ENOMEM;
if (preempt_client_init(gt, &lo))
goto err_client_hi;
for_each_engine(engine, gt, id) {
struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX };
struct igt_live_test t;
struct i915_request *rq;
int ring_size, count, i;
if (!intel_engine_has_preemption(engine))
continue;
rq = spinner_create_request(&lo.spin,
lo.ctx, engine,
MI_ARB_CHECK);
if (IS_ERR(rq))
goto err_wedged;
i915_request_get(rq);
i915_request_add(rq);
ring_size = rq->wa_tail - rq->head;
if (ring_size < 0)
ring_size += rq->ring->size;
ring_size = rq->ring->size / ring_size;
pr_debug("%s(%s): Using maximum of %d requests\n",
__func__, engine->name, ring_size);
igt_spinner_end(&lo.spin);
if (i915_request_wait(rq, 0, HZ / 2) < 0) {
pr_err("Timed out waiting to flush %s\n", engine->name);
i915_request_put(rq);
goto err_wedged;
}
i915_request_put(rq);
if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
err = -EIO;
goto err_wedged;
}
for_each_prime_number_from(count, 1, ring_size) {
rq = spinner_create_request(&hi.spin,
hi.ctx, engine,
MI_ARB_CHECK);
if (IS_ERR(rq))
goto err_wedged;
i915_request_add(rq);
if (!igt_wait_for_spinner(&hi.spin, rq))
goto err_wedged;
rq = spinner_create_request(&lo.spin,
lo.ctx, engine,
MI_ARB_CHECK);
if (IS_ERR(rq))
goto err_wedged;
i915_request_add(rq);
for (i = 0; i < count; i++) {
rq = igt_request_alloc(lo.ctx, engine);
if (IS_ERR(rq))
goto err_wedged;
i915_request_add(rq);
}
rq = igt_request_alloc(hi.ctx, engine);
if (IS_ERR(rq))
goto err_wedged;
i915_request_get(rq);
i915_request_add(rq);
engine->sched_engine->schedule(rq, &attr);
igt_spinner_end(&hi.spin);
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
struct drm_printer p =
drm_info_printer(gt->i915->drm.dev);
pr_err("Failed to preempt over chain of %d\n",
count);
intel_engine_dump(engine, &p,
"%s\n", engine->name);
i915_request_put(rq);
goto err_wedged;
}
igt_spinner_end(&lo.spin);
i915_request_put(rq);
rq = igt_request_alloc(lo.ctx, engine);
if (IS_ERR(rq))
goto err_wedged;
i915_request_get(rq);
i915_request_add(rq);
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
struct drm_printer p =
drm_info_printer(gt->i915->drm.dev);
pr_err("Failed to flush low priority chain of %d requests\n",
count);
intel_engine_dump(engine, &p,
"%s\n", engine->name);
i915_request_put(rq);
goto err_wedged;
}
i915_request_put(rq);
}
if (igt_live_test_end(&t)) {
err = -EIO;
goto err_wedged;
}
}
err = 0;
err_client_lo:
preempt_client_fini(&lo);
err_client_hi:
preempt_client_fini(&hi);
return err;
err_wedged:
igt_spinner_end(&hi.spin);
igt_spinner_end(&lo.spin);
intel_gt_set_wedged(gt);
err = -EIO;
goto err_client_lo;
}
static int create_gang(struct intel_engine_cs *engine,
struct i915_request **prev)
{
struct drm_i915_gem_object *obj;
struct intel_context *ce;
struct i915_request *rq;
struct i915_vma *vma;
u32 *cs;
int err;
ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
obj = i915_gem_object_create_internal(engine->i915, 4096);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto err_ce;
}
vma = i915_vma_instance(obj, ce->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
}
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err)
goto err_obj;
cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err_obj;
}
/* Semaphore target: spin until zero */
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
*cs++ = MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_EQ_SDD;
*cs++ = 0;
*cs++ = lower_32_bits(i915_vma_offset(vma));
*cs++ = upper_32_bits(i915_vma_offset(vma));
if (*prev) {
u64 offset = i915_vma_offset((*prev)->batch);
/* Terminate the spinner in the next lower priority batch. */
*cs++ = MI_STORE_DWORD_IMM_GEN4;
*cs++ = lower_32_bits(offset);
*cs++ = upper_32_bits(offset);
*cs++ = 0;
}
*cs++ = MI_BATCH_BUFFER_END;
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_obj;
}
rq->batch = i915_vma_get(vma);
i915_request_get(rq);
err = igt_vma_move_to_active_unlocked(vma, rq, 0);
if (!err)
err = rq->engine->emit_bb_start(rq,
i915_vma_offset(vma),
PAGE_SIZE, 0);
i915_request_add(rq);
if (err)
goto err_rq;
i915_gem_object_put(obj);
intel_context_put(ce);
rq->mock.link.next = &(*prev)->mock.link;
*prev = rq;
return 0;
err_rq:
i915_vma_put(rq->batch);
i915_request_put(rq);
err_obj:
i915_gem_object_put(obj);
err_ce:
intel_context_put(ce);
return err;
}
static int __live_preempt_ring(struct intel_engine_cs *engine,
struct igt_spinner *spin,
int queue_sz, int ring_sz)
{
struct intel_context *ce[2] = {};
struct i915_request *rq;
struct igt_live_test t;
int err = 0;
int n;
if (igt_live_test_begin(&t, engine->i915, __func__, engine->name))
return -EIO;
for (n = 0; n < ARRAY_SIZE(ce); n++) {
struct intel_context *tmp;
tmp = intel_context_create(engine);
if (IS_ERR(tmp)) {
err = PTR_ERR(tmp);
goto err_ce;
}
tmp->ring_size = ring_sz;
err = intel_context_pin(tmp);
if (err) {
intel_context_put(tmp);
goto err_ce;
}
memset32(tmp->ring->vaddr,
0xdeadbeef, /* trigger a hang if executed */
tmp->ring->vma->size / sizeof(u32));
ce[n] = tmp;
}
rq = igt_spinner_create_request(spin, ce[0], MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ce;
}
i915_request_get(rq);
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
i915_request_add(rq);
if (!igt_wait_for_spinner(spin, rq)) {
intel_gt_set_wedged(engine->gt);
i915_request_put(rq);
err = -ETIME;
goto err_ce;
}
/* Fill the ring, until we will cause a wrap */
n = 0;
while (ce[0]->ring->tail - rq->wa_tail <= queue_sz) {
struct i915_request *tmp;
tmp = intel_context_create_request(ce[0]);
if (IS_ERR(tmp)) {
err = PTR_ERR(tmp);
i915_request_put(rq);
goto err_ce;
}
i915_request_add(tmp);
intel_engine_flush_submission(engine);
n++;
}
intel_engine_flush_submission(engine);
pr_debug("%s: Filled %d with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
engine->name, queue_sz, n,
ce[0]->ring->size,
ce[0]->ring->tail,
ce[0]->ring->emit,
rq->tail);
i915_request_put(rq);
/* Create a second request to preempt the first ring */
rq = intel_context_create_request(ce[1]);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ce;
}
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
i915_request_get(rq);
i915_request_add(rq);
err = wait_for_submit(engine, rq, HZ / 2);
i915_request_put(rq);
if (err) {
pr_err("%s: preemption request was not submitted\n",
engine->name);
err = -ETIME;
}
pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n",
engine->name,
ce[0]->ring->tail, ce[0]->ring->emit,
ce[1]->ring->tail, ce[1]->ring->emit);
err_ce:
intel_engine_flush_submission(engine);
igt_spinner_end(spin);
for (n = 0; n < ARRAY_SIZE(ce); n++) {
if (IS_ERR_OR_NULL(ce[n]))
break;
intel_context_unpin(ce[n]);
intel_context_put(ce[n]);
}
if (igt_live_test_end(&t))
err = -EIO;
return err;
}
static int live_preempt_ring(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
struct igt_spinner spin;
enum intel_engine_id id;
int err = 0;
/*
* Check that we rollback large chunks of a ring in order to do a
* preemption event. Similar to live_unlite_ring, but looking at
* ring size rather than the impact of intel_ring_direction().
*/
if (igt_spinner_init(&spin, gt))
return -ENOMEM;
for_each_engine(engine, gt, id) {
int n;
if (!intel_engine_has_preemption(engine))
continue;
if (!intel_engine_can_store_dword(engine))
continue;
st_engine_heartbeat_disable(engine);
for (n = 0; n <= 3; n++) {
err = __live_preempt_ring(engine, &spin,
n * SZ_4K / 4, SZ_4K);
if (err)
break;
}
st_engine_heartbeat_enable(engine);
if (err)
break;
}
igt_spinner_fini(&spin);
return err;
}
static int live_preempt_gang(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
/*
* Build as long a chain of preempters as we can, with each
* request higher priority than the last. Once we are ready, we release
* the last batch which then precolates down the chain, each releasing
* the next oldest in turn. The intent is to simply push as hard as we
* can with the number of preemptions, trying to exceed narrow HW
* limits. At a minimum, we insist that we can sort all the user
* high priority levels into execution order.
*/
for_each_engine(engine, gt, id) {
struct i915_request *rq = NULL;
struct igt_live_test t;
IGT_TIMEOUT(end_time);
int prio = 0;
int err = 0;
u32 *cs;
if (!intel_engine_has_preemption(engine))
continue;
if (igt_live_test_begin(&t, gt->i915, __func__, engine->name))
return -EIO;
do {
struct i915_sched_attr attr = { .priority = prio++ };
err = create_gang(engine, &rq);
if (err)
break;
/* Submit each spinner at increasing priority */
engine->sched_engine->schedule(rq, &attr);
} while (prio <= I915_PRIORITY_MAX &&
!__igt_timeout(end_time, NULL));
pr_debug("%s: Preempt chain of %d requests\n",
engine->name, prio);
/*
* Such that the last spinner is the highest priority and
* should execute first. When that spinner completes,
* it will terminate the next lowest spinner until there
* are no more spinners and the gang is complete.
*/
cs = i915_gem_object_pin_map_unlocked(rq->batch->obj, I915_MAP_WC);
if (!IS_ERR(cs)) {
*cs = 0;
i915_gem_object_unpin_map(rq->batch->obj);
} else {
err = PTR_ERR(cs);
intel_gt_set_wedged(gt);
}
while (rq) { /* wait for each rq from highest to lowest prio */
struct i915_request *n = list_next_entry(rq, mock.link);
if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0) {
struct drm_printer p =
drm_info_printer(engine->i915->drm.dev);
pr_err("Failed to flush chain of %d requests, at %d\n",
prio, rq_prio(rq));
intel_engine_dump(engine, &p,
"%s\n", engine->name);
err = -ETIME;
}
i915_vma_put(rq->batch);
i915_request_put(rq);
rq = n;
}
if (igt_live_test_end(&t))
err = -EIO;
if (err)
return err;
}
return 0;
}
static struct i915_vma *
create_gpr_user(struct intel_engine_cs *engine,
struct i915_vma *result,
unsigned int offset)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 *cs;
int err;
int i;
obj = i915_gem_object_create_internal(engine->i915, 4096);
if (IS_ERR(obj))
return ERR_CAST(obj);
vma = i915_vma_instance(obj, result->vm, NULL);
if (IS_ERR(vma)) {
i915_gem_object_put(obj);
return vma;
}
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err) {
i915_vma_put(vma);
return ERR_PTR(err);
}
cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(cs)) {
i915_vma_put(vma);
return ERR_CAST(cs);
}
/* All GPR are clear for new contexts. We use GPR(0) as a constant */
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = CS_GPR(engine, 0);
*cs++ = 1;
for (i = 1; i < NUM_GPR; i++) {
u64 addr;
/*
* Perform: GPR[i]++
*
* As we read and write into the context saved GPR[i], if
* we restart this batch buffer from an earlier point, we
* will repeat the increment and store a value > 1.
*/
*cs++ = MI_MATH(4);
*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(i));
*cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(0));
*cs++ = MI_MATH_ADD;
*cs++ = MI_MATH_STORE(MI_MATH_REG(i), MI_MATH_REG_ACCU);
addr = i915_vma_offset(result) + offset + i * sizeof(*cs);
*cs++ = MI_STORE_REGISTER_MEM_GEN8;
*cs++ = CS_GPR(engine, 2 * i);
*cs++ = lower_32_bits(addr);
*cs++ = upper_32_bits(addr);
*cs++ = MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_GTE_SDD;
*cs++ = i;
*cs++ = lower_32_bits(i915_vma_offset(result));
*cs++ = upper_32_bits(i915_vma_offset(result));
}
*cs++ = MI_BATCH_BUFFER_END;
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
return vma;
}
static struct i915_vma *create_global(struct intel_gt *gt, size_t sz)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int err;
obj = i915_gem_object_create_internal(gt->i915, sz);
if (IS_ERR(obj))
return ERR_CAST(obj);
vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
if (IS_ERR(vma)) {
i915_gem_object_put(obj);
return vma;
}
err = i915_ggtt_pin(vma, NULL, 0, 0);
if (err) {
i915_vma_put(vma);
return ERR_PTR(err);
}
return vma;
}
static struct i915_request *
create_gpr_client(struct intel_engine_cs *engine,
struct i915_vma *global,
unsigned int offset)
{
struct i915_vma *batch, *vma;
struct intel_context *ce;
struct i915_request *rq;
int err;
ce = intel_context_create(engine);
if (IS_ERR(ce))
return ERR_CAST(ce);
vma = i915_vma_instance(global->obj, ce->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out_ce;
}
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err)
goto out_ce;
batch = create_gpr_user(engine, vma, offset);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto out_vma;
}
rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_batch;
}
err = igt_vma_move_to_active_unlocked(vma, rq, 0);
i915_vma_lock(batch);
if (!err)
err = i915_vma_move_to_active(batch, rq, 0);
if (!err)
err = rq->engine->emit_bb_start(rq,
i915_vma_offset(batch),
PAGE_SIZE, 0);
i915_vma_unlock(batch);
i915_vma_unpin(batch);
if (!err)
i915_request_get(rq);
i915_request_add(rq);
out_batch:
i915_vma_put(batch);
out_vma:
i915_vma_unpin(vma);
out_ce:
intel_context_put(ce);
return err ? ERR_PTR(err) : rq;
}
static int preempt_user(struct intel_engine_cs *engine,
struct i915_vma *global,
int id)
{
struct i915_sched_attr attr = {
.priority = I915_PRIORITY_MAX
};
struct i915_request *rq;
int err = 0;
u32 *cs;
rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq))
return PTR_ERR(rq);
cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs)) {
i915_request_add(rq);
return PTR_ERR(cs);
}
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = i915_ggtt_offset(global);
*cs++ = 0;
*cs++ = id;
intel_ring_advance(rq, cs);
i915_request_get(rq);
i915_request_add(rq);
engine->sched_engine->schedule(rq, &attr);
if (i915_request_wait(rq, 0, HZ / 2) < 0)
err = -ETIME;
i915_request_put(rq);
return err;
}
static int live_preempt_user(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
struct i915_vma *global;
enum intel_engine_id id;
u32 *result;
int err = 0;
/*
* In our other tests, we look at preemption in carefully
* controlled conditions in the ringbuffer. Since most of the
* time is spent in user batches, most of our preemptions naturally
* occur there. We want to verify that when we preempt inside a batch
* we continue on from the current instruction and do not roll back
* to the start, or another earlier arbitration point.
*
* To verify this, we create a batch which is a mixture of
* MI_MATH (gpr++) MI_SRM (gpr) and preemption points. Then with
* a few preempting contexts thrown into the mix, we look for any
* repeated instructions (which show up as incorrect values).
*/
global = create_global(gt, 4096);
if (IS_ERR(global))
return PTR_ERR(global);
result = i915_gem_object_pin_map_unlocked(global->obj, I915_MAP_WC);
if (IS_ERR(result)) {
i915_vma_unpin_and_release(&global, 0);
return PTR_ERR(result);
}
for_each_engine(engine, gt, id) {
struct i915_request *client[3] = {};
struct igt_live_test t;
int i;
if (!intel_engine_has_preemption(engine))
continue;
if (GRAPHICS_VER(gt->i915) == 8 && engine->class != RENDER_CLASS)
continue; /* we need per-context GPR */
if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
err = -EIO;
break;
}
memset(result, 0, 4096);
for (i = 0; i < ARRAY_SIZE(client); i++) {
struct i915_request *rq;
rq = create_gpr_client(engine, global,
NUM_GPR * i * sizeof(u32));
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto end_test;
}
client[i] = rq;
}
/* Continuously preempt the set of 3 running contexts */
for (i = 1; i <= NUM_GPR; i++) {
err = preempt_user(engine, global, i);
if (err)
goto end_test;
}
if (READ_ONCE(result[0]) != NUM_GPR) {
pr_err("%s: Failed to release semaphore\n",
engine->name);
err = -EIO;
goto end_test;
}
for (i = 0; i < ARRAY_SIZE(client); i++) {
int gpr;
if (i915_request_wait(client[i], 0, HZ / 2) < 0) {
err = -ETIME;
goto end_test;
}
for (gpr = 1; gpr < NUM_GPR; gpr++) {
if (result[NUM_GPR * i + gpr] != 1) {
pr_err("%s: Invalid result, client %d, gpr %d, result: %d\n",
engine->name,
i, gpr, result[NUM_GPR * i + gpr]);
err = -EINVAL;
goto end_test;
}
}
}
end_test:
for (i = 0; i < ARRAY_SIZE(client); i++) {
if (!client[i])
break;
i915_request_put(client[i]);
}
/* Flush the semaphores on error */
smp_store_mb(result[0], -1);
if (igt_live_test_end(&t))
err = -EIO;
if (err)
break;
}
i915_vma_unpin_and_release(&global, I915_VMA_RELEASE_MAP);
return err;
}
static int live_preempt_timeout(void *arg)
{
struct intel_gt *gt = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
struct igt_spinner spin_lo;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = -ENOMEM;
/*
* Check that we force preemption to occur by cancelling the previous
* context if it refuses to yield the GPU.
*/
if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
return 0;
if (!intel_has_reset_engine(gt))
return 0;
ctx_hi = kernel_context(gt->i915, NULL);
if (!ctx_hi)
return -ENOMEM;
ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
ctx_lo = kernel_context(gt->i915, NULL);
if (!ctx_lo)
goto err_ctx_hi;
ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
if (igt_spinner_init(&spin_lo, gt))
goto err_ctx_lo;
for_each_engine(engine, gt, id) {
unsigned long saved_timeout;
struct i915_request *rq;
if (!intel_engine_has_preemption(engine))
continue;
rq = spinner_create_request(&spin_lo, ctx_lo, engine,
MI_NOOP); /* preemption disabled */
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_spin_lo;
}
i915_request_add(rq);
if (!igt_wait_for_spinner(&spin_lo, rq)) {
intel_gt_set_wedged(gt);
err = -EIO;
goto err_spin_lo;
}
rq = igt_request_alloc(ctx_hi, engine);
if (IS_ERR(rq)) {
igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
goto err_spin_lo;
}
/* Flush the previous CS ack before changing timeouts */
while (READ_ONCE(engine->execlists.pending[0]))
cpu_relax();
saved_timeout = engine->props.preempt_timeout_ms;
engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffie */
i915_request_get(rq);
i915_request_add(rq);
intel_engine_flush_submission(engine);
engine->props.preempt_timeout_ms = saved_timeout;
if (i915_request_wait(rq, 0, HZ / 10) < 0) {
intel_gt_set_wedged(gt);
i915_request_put(rq);
err = -ETIME;
goto err_spin_lo;
}
igt_spinner_end(&spin_lo);
i915_request_put(rq);
}
err = 0;
err_spin_lo:
igt_spinner_fini(&spin_lo);
err_ctx_lo:
kernel_context_close(ctx_lo);
err_ctx_hi:
kernel_context_close(ctx_hi);
return err;
}
static int random_range(struct rnd_state *rnd, int min, int max)
{
return i915_prandom_u32_max_state(max - min, rnd) + min;
}
static int random_priority(struct rnd_state *rnd)
{
return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
}
struct preempt_smoke {
struct intel_gt *gt;
struct kthread_work work;
struct i915_gem_context **contexts;
struct intel_engine_cs *engine;
struct drm_i915_gem_object *batch;
unsigned int ncontext;
struct rnd_state prng;
unsigned long count;
int result;
};
static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
{
return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
&smoke->prng)];
}
static int smoke_submit(struct preempt_smoke *smoke,
struct i915_gem_context *ctx, int prio,
struct drm_i915_gem_object *batch)
{
struct i915_request *rq;
struct i915_vma *vma = NULL;
int err = 0;
if (batch) {
struct i915_address_space *vm;
vm = i915_gem_context_get_eb_vm(ctx);
vma = i915_vma_instance(batch, vm, NULL);
i915_vm_put(vm);
if (IS_ERR(vma))
return PTR_ERR(vma);
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err)
return err;
}
ctx->sched.priority = prio;
rq = igt_request_alloc(ctx, smoke->engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto unpin;
}
if (vma) {
err = igt_vma_move_to_active_unlocked(vma, rq, 0);
if (!err)
err = rq->engine->emit_bb_start(rq,
i915_vma_offset(vma),
PAGE_SIZE, 0);
}
i915_request_add(rq);
unpin:
if (vma)
i915_vma_unpin(vma);
return err;
}
static void smoke_crescendo_work(struct kthread_work *work)
{
struct preempt_smoke *smoke = container_of(work, typeof(*smoke), work);
IGT_TIMEOUT(end_time);
unsigned long count;
count = 0;
do {
struct i915_gem_context *ctx = smoke_context(smoke);
smoke->result = smoke_submit(smoke, ctx,
count % I915_PRIORITY_MAX,
smoke->batch);
count++;
} while (!smoke->result && count < smoke->ncontext &&
!__igt_timeout(end_time, NULL));
smoke->count = count;
}
static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
#define BATCH BIT(0)
{
struct kthread_worker *worker[I915_NUM_ENGINES] = {};
struct preempt_smoke *arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned long count;
int err = 0;
arg = kmalloc_array(I915_NUM_ENGINES, sizeof(*arg), GFP_KERNEL);
if (!arg)
return -ENOMEM;
memset(arg, 0, I915_NUM_ENGINES * sizeof(*arg));
for_each_engine(engine, smoke->gt, id) {
arg[id] = *smoke;
arg[id].engine = engine;
if (!(flags & BATCH))
arg[id].batch = NULL;
arg[id].count = 0;
worker[id] = kthread_create_worker(0, "igt/smoke:%d", id);
if (IS_ERR(worker[id])) {
err = PTR_ERR(worker[id]);
break;
}
kthread_init_work(&arg[id].work, smoke_crescendo_work);
kthread_queue_work(worker[id], &arg[id].work);
}
count = 0;
for_each_engine(engine, smoke->gt, id) {
if (IS_ERR_OR_NULL(worker[id]))
continue;
kthread_flush_work(&arg[id].work);
if (arg[id].result && !err)
err = arg[id].result;
count += arg[id].count;
kthread_destroy_worker(worker[id]);
}
pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
count, flags, smoke->gt->info.num_engines, smoke->ncontext);
kfree(arg);
return 0;
}
static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
{
enum intel_engine_id id;
IGT_TIMEOUT(end_time);
unsigned long count;
count = 0;
do {
for_each_engine(smoke->engine, smoke->gt, id) {
struct i915_gem_context *ctx = smoke_context(smoke);
int err;
err = smoke_submit(smoke,
ctx, random_priority(&smoke->prng),
flags & BATCH ? smoke->batch : NULL);
if (err)
return err;
count++;
}
} while (count < smoke->ncontext && !__igt_timeout(end_time, NULL));
pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
count, flags, smoke->gt->info.num_engines, smoke->ncontext);
return 0;
}
static int live_preempt_smoke(void *arg)
{
struct preempt_smoke smoke = {
.gt = arg,
.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
.ncontext = 256,
};
const unsigned int phase[] = { 0, BATCH };
struct igt_live_test t;
int err = -ENOMEM;
u32 *cs;
int n;
smoke.contexts = kmalloc_array(smoke.ncontext,
sizeof(*smoke.contexts),
GFP_KERNEL);
if (!smoke.contexts)
return -ENOMEM;
smoke.batch =
i915_gem_object_create_internal(smoke.gt->i915, PAGE_SIZE);
if (IS_ERR(smoke.batch)) {
err = PTR_ERR(smoke.batch);
goto err_free;
}
cs = i915_gem_object_pin_map_unlocked(smoke.batch, I915_MAP_WB);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err_batch;
}
for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
cs[n] = MI_ARB_CHECK;
cs[n] = MI_BATCH_BUFFER_END;
i915_gem_object_flush_map(smoke.batch);
i915_gem_object_unpin_map(smoke.batch);
if (igt_live_test_begin(&t, smoke.gt->i915, __func__, "all")) {
err = -EIO;
goto err_batch;
}
for (n = 0; n < smoke.ncontext; n++) {
smoke.contexts[n] = kernel_context(smoke.gt->i915, NULL);
if (!smoke.contexts[n])
goto err_ctx;
}
for (n = 0; n < ARRAY_SIZE(phase); n++) {
err = smoke_crescendo(&smoke, phase[n]);
if (err)
goto err_ctx;
err = smoke_random(&smoke, phase[n]);
if (err)
goto err_ctx;
}
err_ctx:
if (igt_live_test_end(&t))
err = -EIO;
for (n = 0; n < smoke.ncontext; n++) {
if (!smoke.contexts[n])
break;
kernel_context_close(smoke.contexts[n]);
}
err_batch:
i915_gem_object_put(smoke.batch);
err_free:
kfree(smoke.contexts);
return err;
}
static int nop_virtual_engine(struct intel_gt *gt,
struct intel_engine_cs **siblings,
unsigned int nsibling,
unsigned int nctx,
unsigned int flags)
#define CHAIN BIT(0)
{
IGT_TIMEOUT(end_time);
struct i915_request *request[16] = {};
struct intel_context *ve[16];
unsigned long n, prime, nc;
struct igt_live_test t;
ktime_t times[2] = {};
int err;
GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve));
for (n = 0; n < nctx; n++) {
ve[n] = intel_engine_create_virtual(siblings, nsibling, 0);
if (IS_ERR(ve[n])) {
err = PTR_ERR(ve[n]);
nctx = n;
goto out;
}
err = intel_context_pin(ve[n]);
if (err) {
intel_context_put(ve[n]);
nctx = n;
goto out;
}
}
err = igt_live_test_begin(&t, gt->i915, __func__, ve[0]->engine->name);
if (err)
goto out;
for_each_prime_number_from(prime, 1, 8192) {
times[1] = ktime_get_raw();
if (flags & CHAIN) {
for (nc = 0; nc < nctx; nc++) {
for (n = 0; n < prime; n++) {
struct i915_request *rq;
rq = i915_request_create(ve[nc]);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out;
}
if (request[nc])
i915_request_put(request[nc]);
request[nc] = i915_request_get(rq);
i915_request_add(rq);
}
}
} else {
for (n = 0; n < prime; n++) {
for (nc = 0; nc < nctx; nc++) {
struct i915_request *rq;
rq = i915_request_create(ve[nc]);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out;
}
if (request[nc])
i915_request_put(request[nc]);
request[nc] = i915_request_get(rq);
i915_request_add(rq);
}
}
}
for (nc = 0; nc < nctx; nc++) {
if (i915_request_wait(request[nc], 0, HZ / 10) < 0) {
pr_err("%s(%s): wait for %llx:%lld timed out\n",
__func__, ve[0]->engine->name,
request[nc]->fence.context,
request[nc]->fence.seqno);
GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
__func__, ve[0]->engine->name,
request[nc]->fence.context,
request[nc]->fence.seqno);
GEM_TRACE_DUMP();
intel_gt_set_wedged(gt);
break;
}
}
times[1] = ktime_sub(ktime_get_raw(), times[1]);
if (prime == 1)
times[0] = times[1];
for (nc = 0; nc < nctx; nc++) {
i915_request_put(request[nc]);
request[nc] = NULL;
}
if (__igt_timeout(end_time, NULL))
break;
}
err = igt_live_test_end(&t);
if (err)
goto out;
pr_info("Requestx%d latencies on %s: 1 = %lluns, %lu = %lluns\n",
nctx, ve[0]->engine->name, ktime_to_ns(times[0]),
prime, div64_u64(ktime_to_ns(times[1]), prime));
out:
if (igt_flush_test(gt->i915))
err = -EIO;
for (nc = 0; nc < nctx; nc++) {
i915_request_put(request[nc]);
intel_context_unpin(ve[nc]);
intel_context_put(ve[nc]);
}
return err;
}
static unsigned int
__select_siblings(struct intel_gt *gt,
unsigned int class,
struct intel_engine_cs **siblings,
bool (*filter)(const struct intel_engine_cs *))
{
unsigned int n = 0;
unsigned int inst;
for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
if (!gt->engine_class[class][inst])
continue;
if (filter && !filter(gt->engine_class[class][inst]))
continue;
siblings[n++] = gt->engine_class[class][inst];
}
return n;
}
static unsigned int
select_siblings(struct intel_gt *gt,
unsigned int class,
struct intel_engine_cs **siblings)
{
return __select_siblings(gt, class, siblings, NULL);
}
static int live_virtual_engine(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int class;
int err;
if (intel_uc_uses_guc_submission(>->uc))
return 0;
for_each_engine(engine, gt, id) {
err = nop_virtual_engine(gt, &engine, 1, 1, 0);
if (err) {
pr_err("Failed to wrap engine %s: err=%d\n",
engine->name, err);
return err;
}
}
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
int nsibling, n;
nsibling = select_siblings(gt, class, siblings);
if (nsibling < 2)
continue;
for (n = 1; n <= nsibling + 1; n++) {
err = nop_virtual_engine(gt, siblings, nsibling,
n, 0);
if (err)
return err;
}
err = nop_virtual_engine(gt, siblings, nsibling, n, CHAIN);
if (err)
return err;
}
return 0;
}
static int mask_virtual_engine(struct intel_gt *gt,
struct intel_engine_cs **siblings,
unsigned int nsibling)
{
struct i915_request *request[MAX_ENGINE_INSTANCE + 1];
struct intel_context *ve;
struct igt_live_test t;
unsigned int n;
int err;
/*
* Check that by setting the execution mask on a request, we can
* restrict it to our desired engine within the virtual engine.
*/
ve = intel_engine_create_virtual(siblings, nsibling, 0);
if (IS_ERR(ve)) {
err = PTR_ERR(ve);
goto out_close;
}
err = intel_context_pin(ve);
if (err)
goto out_put;
err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
if (err)
goto out_unpin;
for (n = 0; n < nsibling; n++) {
request[n] = i915_request_create(ve);
if (IS_ERR(request[n])) {
err = PTR_ERR(request[n]);
nsibling = n;
goto out;
}
/* Reverse order as it's more likely to be unnatural */
request[n]->execution_mask = siblings[nsibling - n - 1]->mask;
i915_request_get(request[n]);
i915_request_add(request[n]);
}
for (n = 0; n < nsibling; n++) {
if (i915_request_wait(request[n], 0, HZ / 10) < 0) {
pr_err("%s(%s): wait for %llx:%lld timed out\n",
__func__, ve->engine->name,
request[n]->fence.context,
request[n]->fence.seqno);
GEM_TRACE("%s(%s) failed at request %llx:%lld\n",
__func__, ve->engine->name,
request[n]->fence.context,
request[n]->fence.seqno);
GEM_TRACE_DUMP();
intel_gt_set_wedged(gt);
err = -EIO;
goto out;
}
if (request[n]->engine != siblings[nsibling - n - 1]) {
pr_err("Executed on wrong sibling '%s', expected '%s'\n",
request[n]->engine->name,
siblings[nsibling - n - 1]->name);
err = -EINVAL;
goto out;
}
}
err = igt_live_test_end(&t);
out:
if (igt_flush_test(gt->i915))
err = -EIO;
for (n = 0; n < nsibling; n++)
i915_request_put(request[n]);
out_unpin:
intel_context_unpin(ve);
out_put:
intel_context_put(ve);
out_close:
return err;
}
static int live_virtual_mask(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
unsigned int class;
int err;
if (intel_uc_uses_guc_submission(>->uc))
return 0;
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
unsigned int nsibling;
nsibling = select_siblings(gt, class, siblings);
if (nsibling < 2)
continue;
err = mask_virtual_engine(gt, siblings, nsibling);
if (err)
return err;
}
return 0;
}
static int slicein_virtual_engine(struct intel_gt *gt,
struct intel_engine_cs **siblings,
unsigned int nsibling)
{
const long timeout = slice_timeout(siblings[0]);
struct intel_context *ce;
struct i915_request *rq;
struct igt_spinner spin;
unsigned int n;
int err = 0;
/*
* Virtual requests must take part in timeslicing on the target engines.
*/
if (igt_spinner_init(&spin, gt))
return -ENOMEM;
for (n = 0; n < nsibling; n++) {
ce = intel_context_create(siblings[n]);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out;
}
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
intel_context_put(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out;
}
i915_request_add(rq);
}
ce = intel_engine_create_virtual(siblings, nsibling, 0);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out;
}
rq = intel_context_create_request(ce);
intel_context_put(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out;
}
i915_request_get(rq);
i915_request_add(rq);
if (i915_request_wait(rq, 0, timeout) < 0) {
GEM_TRACE_ERR("%s(%s) failed to slice in virtual request\n",
__func__, rq->engine->name);
GEM_TRACE_DUMP();
intel_gt_set_wedged(gt);
err = -EIO;
}
i915_request_put(rq);
out:
igt_spinner_end(&spin);
if (igt_flush_test(gt->i915))
err = -EIO;
igt_spinner_fini(&spin);
return err;
}
static int sliceout_virtual_engine(struct intel_gt *gt,
struct intel_engine_cs **siblings,
unsigned int nsibling)
{
const long timeout = slice_timeout(siblings[0]);
struct intel_context *ce;
struct i915_request *rq;
struct igt_spinner spin;
unsigned int n;
int err = 0;
/*
* Virtual requests must allow others a fair timeslice.
*/
if (igt_spinner_init(&spin, gt))
return -ENOMEM;
/* XXX We do not handle oversubscription and fairness with normal rq */
for (n = 0; n < nsibling; n++) {
ce = intel_engine_create_virtual(siblings, nsibling, 0);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out;
}
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
intel_context_put(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out;
}
i915_request_add(rq);
}
for (n = 0; !err && n < nsibling; n++) {
ce = intel_context_create(siblings[n]);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out;
}
rq = intel_context_create_request(ce);
intel_context_put(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out;
}
i915_request_get(rq);
i915_request_add(rq);
if (i915_request_wait(rq, 0, timeout) < 0) {
GEM_TRACE_ERR("%s(%s) failed to slice out virtual request\n",
__func__, siblings[n]->name);
GEM_TRACE_DUMP();
intel_gt_set_wedged(gt);
err = -EIO;
}
i915_request_put(rq);
}
out:
igt_spinner_end(&spin);
if (igt_flush_test(gt->i915))
err = -EIO;
igt_spinner_fini(&spin);
return err;
}
static int live_virtual_slice(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
unsigned int class;
int err;
if (intel_uc_uses_guc_submission(>->uc))
return 0;
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
unsigned int nsibling;
nsibling = __select_siblings(gt, class, siblings,
intel_engine_has_timeslices);
if (nsibling < 2)
continue;
err = slicein_virtual_engine(gt, siblings, nsibling);
if (err)
return err;
err = sliceout_virtual_engine(gt, siblings, nsibling);
if (err)
return err;
}
return 0;
}
static int preserved_virtual_engine(struct intel_gt *gt,
struct intel_engine_cs **siblings,
unsigned int nsibling)
{
struct i915_request *last = NULL;
struct intel_context *ve;
struct i915_vma *scratch;
struct igt_live_test t;
unsigned int n;
int err = 0;
u32 *cs;
scratch =
__vm_create_scratch_for_read_pinned(&siblings[0]->gt->ggtt->vm,
PAGE_SIZE);
if (IS_ERR(scratch))
return PTR_ERR(scratch);
err = i915_vma_sync(scratch);
if (err)
goto out_scratch;
ve = intel_engine_create_virtual(siblings, nsibling, 0);
if (IS_ERR(ve)) {
err = PTR_ERR(ve);
goto out_scratch;
}
err = intel_context_pin(ve);
if (err)
goto out_put;
err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name);
if (err)
goto out_unpin;
for (n = 0; n < NUM_GPR_DW; n++) {
struct intel_engine_cs *engine = siblings[n % nsibling];
struct i915_request *rq;
rq = i915_request_create(ve);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_end;
}
i915_request_put(last);
last = i915_request_get(rq);
cs = intel_ring_begin(rq, 8);
if (IS_ERR(cs)) {
i915_request_add(rq);
err = PTR_ERR(cs);
goto out_end;
}
*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
*cs++ = CS_GPR(engine, n);
*cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
*cs++ = 0;
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = CS_GPR(engine, (n + 1) % NUM_GPR_DW);
*cs++ = n + 1;
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
/* Restrict this request to run on a particular engine */
rq->execution_mask = engine->mask;
i915_request_add(rq);
}
if (i915_request_wait(last, 0, HZ / 5) < 0) {
err = -ETIME;
goto out_end;
}
cs = i915_gem_object_pin_map_unlocked(scratch->obj, I915_MAP_WB);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto out_end;
}
for (n = 0; n < NUM_GPR_DW; n++) {
if (cs[n] != n) {
pr_err("Incorrect value[%d] found for GPR[%d]\n",
cs[n], n);
err = -EINVAL;
break;
}
}
i915_gem_object_unpin_map(scratch->obj);
out_end:
if (igt_live_test_end(&t))
err = -EIO;
i915_request_put(last);
out_unpin:
intel_context_unpin(ve);
out_put:
intel_context_put(ve);
out_scratch:
i915_vma_unpin_and_release(&scratch, 0);
return err;
}
static int live_virtual_preserved(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
unsigned int class;
/*
* Check that the context image retains non-privileged (user) registers
* from one engine to the next. For this we check that the CS_GPR
* are preserved.
*/
if (intel_uc_uses_guc_submission(>->uc))
return 0;
/* As we use CS_GPR we cannot run before they existed on all engines. */
if (GRAPHICS_VER(gt->i915) < 9)
return 0;
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
int nsibling, err;
nsibling = select_siblings(gt, class, siblings);
if (nsibling < 2)
continue;
err = preserved_virtual_engine(gt, siblings, nsibling);
if (err)
return err;
}
return 0;
}
static int reset_virtual_engine(struct intel_gt *gt,
struct intel_engine_cs **siblings,
unsigned int nsibling)
{
struct intel_engine_cs *engine;
struct intel_context *ve;
struct igt_spinner spin;
struct i915_request *rq;
unsigned int n;
int err = 0;
/*
* In order to support offline error capture for fast preempt reset,
* we need to decouple the guilty request and ensure that it and its
* descendents are not executed while the capture is in progress.
*/
if (igt_spinner_init(&spin, gt))
return -ENOMEM;
ve = intel_engine_create_virtual(siblings, nsibling, 0);
if (IS_ERR(ve)) {
err = PTR_ERR(ve);
goto out_spin;
}
for (n = 0; n < nsibling; n++)
st_engine_heartbeat_disable(siblings[n]);
rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_heartbeat;
}
i915_request_add(rq);
if (!igt_wait_for_spinner(&spin, rq)) {
intel_gt_set_wedged(gt);
err = -ETIME;
goto out_heartbeat;
}
engine = rq->engine;
GEM_BUG_ON(engine == ve->engine);
/* Take ownership of the reset and tasklet */
err = engine_lock_reset_tasklet(engine);
if (err)
goto out_heartbeat;
engine->sched_engine->tasklet.callback(&engine->sched_engine->tasklet);
GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
/* Fake a preemption event; failed of course */
spin_lock_irq(&engine->sched_engine->lock);
__unwind_incomplete_requests(engine);
spin_unlock_irq(&engine->sched_engine->lock);
GEM_BUG_ON(rq->engine != engine);
/* Reset the engine while keeping our active request on hold */
execlists_hold(engine, rq);
GEM_BUG_ON(!i915_request_on_hold(rq));
__intel_engine_reset_bh(engine, NULL);
GEM_BUG_ON(rq->fence.error != -EIO);
/* Release our grasp on the engine, letting CS flow again */
engine_unlock_reset_tasklet(engine);
/* Check that we do not resubmit the held request */
i915_request_get(rq);
if (!i915_request_wait(rq, 0, HZ / 5)) {
pr_err("%s: on hold request completed!\n",
engine->name);
intel_gt_set_wedged(gt);
err = -EIO;
goto out_rq;
}
GEM_BUG_ON(!i915_request_on_hold(rq));
/* But is resubmitted on release */
execlists_unhold(engine, rq);
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("%s: held request did not complete!\n",
engine->name);
intel_gt_set_wedged(gt);
err = -ETIME;
}
out_rq:
i915_request_put(rq);
out_heartbeat:
for (n = 0; n < nsibling; n++)
st_engine_heartbeat_enable(siblings[n]);
intel_context_put(ve);
out_spin:
igt_spinner_fini(&spin);
return err;
}
static int live_virtual_reset(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
unsigned int class;
/*
* Check that we handle a reset event within a virtual engine.
* Only the physical engine is reset, but we have to check the flow
* of the virtual requests around the reset, and make sure it is not
* forgotten.
*/
if (intel_uc_uses_guc_submission(>->uc))
return 0;
if (!intel_has_reset_engine(gt))
return 0;
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
int nsibling, err;
nsibling = select_siblings(gt, class, siblings);
if (nsibling < 2)
continue;
err = reset_virtual_engine(gt, siblings, nsibling);
if (err)
return err;
}
return 0;
}
int intel_execlists_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_sanitycheck),
SUBTEST(live_unlite_switch),
SUBTEST(live_unlite_preempt),
SUBTEST(live_unlite_ring),
SUBTEST(live_pin_rewind),
SUBTEST(live_hold_reset),
SUBTEST(live_error_interrupt),
SUBTEST(live_timeslice_preempt),
SUBTEST(live_timeslice_rewind),
SUBTEST(live_timeslice_queue),
SUBTEST(live_timeslice_nopreempt),
SUBTEST(live_busywait_preempt),
SUBTEST(live_preempt),
SUBTEST(live_late_preempt),
SUBTEST(live_nopreempt),
SUBTEST(live_preempt_cancel),
SUBTEST(live_suppress_self_preempt),
SUBTEST(live_chain_preempt),
SUBTEST(live_preempt_ring),
SUBTEST(live_preempt_gang),
SUBTEST(live_preempt_timeout),
SUBTEST(live_preempt_user),
SUBTEST(live_preempt_smoke),
SUBTEST(live_virtual_engine),
SUBTEST(live_virtual_mask),
SUBTEST(live_virtual_preserved),
SUBTEST(live_virtual_slice),
SUBTEST(live_virtual_reset),
};
if (to_gt(i915)->submission_method != INTEL_SUBMISSION_ELSP)
return 0;
if (intel_gt_is_wedged(to_gt(i915)))
return 0;
return intel_gt_live_subtests(tests, to_gt(i915));
}
| linux-master | drivers/gpu/drm/i915/gt/selftest_execlists.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2016 Intel Corporation
*/
#include <linux/string_helpers.h>
#include <drm/drm_print.h>
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_internal.h"
#include "gt/intel_gt_print.h"
#include "gt/intel_gt_regs.h"
#include "i915_cmd_parser.h"
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
#include "intel_breadcrumbs.h"
#include "intel_context.h"
#include "intel_engine.h"
#include "intel_engine_pm.h"
#include "intel_engine_regs.h"
#include "intel_engine_user.h"
#include "intel_execlists_submission.h"
#include "intel_gt.h"
#include "intel_gt_mcr.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
#include "intel_lrc.h"
#include "intel_lrc_reg.h"
#include "intel_reset.h"
#include "intel_ring.h"
#include "uc/intel_guc_submission.h"
/* Haswell does have the CXT_SIZE register however it does not appear to be
* valid. Now, docs explain in dwords what is in the context object. The full
* size is 70720 bytes, however, the power context and execlist context will
* never be saved (power context is stored elsewhere, and execlists don't work
* on HSW) - so the final size, including the extra state required for the
* Resource Streamer, is 66944 bytes, which rounds to 17 pages.
*/
#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
#define DEFAULT_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
#define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE)
#define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
#define MAX_MMIO_BASES 3
struct engine_info {
u8 class;
u8 instance;
/* mmio bases table *must* be sorted in reverse graphics_ver order */
struct engine_mmio_base {
u32 graphics_ver : 8;
u32 base : 24;
} mmio_bases[MAX_MMIO_BASES];
};
static const struct engine_info intel_engines[] = {
[RCS0] = {
.class = RENDER_CLASS,
.instance = 0,
.mmio_bases = {
{ .graphics_ver = 1, .base = RENDER_RING_BASE }
},
},
[BCS0] = {
.class = COPY_ENGINE_CLASS,
.instance = 0,
.mmio_bases = {
{ .graphics_ver = 6, .base = BLT_RING_BASE }
},
},
[BCS1] = {
.class = COPY_ENGINE_CLASS,
.instance = 1,
.mmio_bases = {
{ .graphics_ver = 12, .base = XEHPC_BCS1_RING_BASE }
},
},
[BCS2] = {
.class = COPY_ENGINE_CLASS,
.instance = 2,
.mmio_bases = {
{ .graphics_ver = 12, .base = XEHPC_BCS2_RING_BASE }
},
},
[BCS3] = {
.class = COPY_ENGINE_CLASS,
.instance = 3,
.mmio_bases = {
{ .graphics_ver = 12, .base = XEHPC_BCS3_RING_BASE }
},
},
[BCS4] = {
.class = COPY_ENGINE_CLASS,
.instance = 4,
.mmio_bases = {
{ .graphics_ver = 12, .base = XEHPC_BCS4_RING_BASE }
},
},
[BCS5] = {
.class = COPY_ENGINE_CLASS,
.instance = 5,
.mmio_bases = {
{ .graphics_ver = 12, .base = XEHPC_BCS5_RING_BASE }
},
},
[BCS6] = {
.class = COPY_ENGINE_CLASS,
.instance = 6,
.mmio_bases = {
{ .graphics_ver = 12, .base = XEHPC_BCS6_RING_BASE }
},
},
[BCS7] = {
.class = COPY_ENGINE_CLASS,
.instance = 7,
.mmio_bases = {
{ .graphics_ver = 12, .base = XEHPC_BCS7_RING_BASE }
},
},
[BCS8] = {
.class = COPY_ENGINE_CLASS,
.instance = 8,
.mmio_bases = {
{ .graphics_ver = 12, .base = XEHPC_BCS8_RING_BASE }
},
},
[VCS0] = {
.class = VIDEO_DECODE_CLASS,
.instance = 0,
.mmio_bases = {
{ .graphics_ver = 11, .base = GEN11_BSD_RING_BASE },
{ .graphics_ver = 6, .base = GEN6_BSD_RING_BASE },
{ .graphics_ver = 4, .base = BSD_RING_BASE }
},
},
[VCS1] = {
.class = VIDEO_DECODE_CLASS,
.instance = 1,
.mmio_bases = {
{ .graphics_ver = 11, .base = GEN11_BSD2_RING_BASE },
{ .graphics_ver = 8, .base = GEN8_BSD2_RING_BASE }
},
},
[VCS2] = {
.class = VIDEO_DECODE_CLASS,
.instance = 2,
.mmio_bases = {
{ .graphics_ver = 11, .base = GEN11_BSD3_RING_BASE }
},
},
[VCS3] = {
.class = VIDEO_DECODE_CLASS,
.instance = 3,
.mmio_bases = {
{ .graphics_ver = 11, .base = GEN11_BSD4_RING_BASE }
},
},
[VCS4] = {
.class = VIDEO_DECODE_CLASS,
.instance = 4,
.mmio_bases = {
{ .graphics_ver = 12, .base = XEHP_BSD5_RING_BASE }
},
},
[VCS5] = {
.class = VIDEO_DECODE_CLASS,
.instance = 5,
.mmio_bases = {
{ .graphics_ver = 12, .base = XEHP_BSD6_RING_BASE }
},
},
[VCS6] = {
.class = VIDEO_DECODE_CLASS,
.instance = 6,
.mmio_bases = {
{ .graphics_ver = 12, .base = XEHP_BSD7_RING_BASE }
},
},
[VCS7] = {
.class = VIDEO_DECODE_CLASS,
.instance = 7,
.mmio_bases = {
{ .graphics_ver = 12, .base = XEHP_BSD8_RING_BASE }
},
},
[VECS0] = {
.class = VIDEO_ENHANCEMENT_CLASS,
.instance = 0,
.mmio_bases = {
{ .graphics_ver = 11, .base = GEN11_VEBOX_RING_BASE },
{ .graphics_ver = 7, .base = VEBOX_RING_BASE }
},
},
[VECS1] = {
.class = VIDEO_ENHANCEMENT_CLASS,
.instance = 1,
.mmio_bases = {
{ .graphics_ver = 11, .base = GEN11_VEBOX2_RING_BASE }
},
},
[VECS2] = {
.class = VIDEO_ENHANCEMENT_CLASS,
.instance = 2,
.mmio_bases = {
{ .graphics_ver = 12, .base = XEHP_VEBOX3_RING_BASE }
},
},
[VECS3] = {
.class = VIDEO_ENHANCEMENT_CLASS,
.instance = 3,
.mmio_bases = {
{ .graphics_ver = 12, .base = XEHP_VEBOX4_RING_BASE }
},
},
[CCS0] = {
.class = COMPUTE_CLASS,
.instance = 0,
.mmio_bases = {
{ .graphics_ver = 12, .base = GEN12_COMPUTE0_RING_BASE }
}
},
[CCS1] = {
.class = COMPUTE_CLASS,
.instance = 1,
.mmio_bases = {
{ .graphics_ver = 12, .base = GEN12_COMPUTE1_RING_BASE }
}
},
[CCS2] = {
.class = COMPUTE_CLASS,
.instance = 2,
.mmio_bases = {
{ .graphics_ver = 12, .base = GEN12_COMPUTE2_RING_BASE }
}
},
[CCS3] = {
.class = COMPUTE_CLASS,
.instance = 3,
.mmio_bases = {
{ .graphics_ver = 12, .base = GEN12_COMPUTE3_RING_BASE }
}
},
[GSC0] = {
.class = OTHER_CLASS,
.instance = OTHER_GSC_INSTANCE,
.mmio_bases = {
{ .graphics_ver = 12, .base = MTL_GSC_RING_BASE }
}
},
};
/**
* intel_engine_context_size() - return the size of the context for an engine
* @gt: the gt
* @class: engine class
*
* Each engine class may require a different amount of space for a context
* image.
*
* Return: size (in bytes) of an engine class specific context image
*
* Note: this size includes the HWSP, which is part of the context image
* in LRC mode, but does not include the "shared data page" used with
* GuC submission. The caller should account for this if using the GuC.
*/
u32 intel_engine_context_size(struct intel_gt *gt, u8 class)
{
struct intel_uncore *uncore = gt->uncore;
u32 cxt_size;
BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
switch (class) {
case COMPUTE_CLASS:
fallthrough;
case RENDER_CLASS:
switch (GRAPHICS_VER(gt->i915)) {
default:
MISSING_CASE(GRAPHICS_VER(gt->i915));
return DEFAULT_LR_CONTEXT_RENDER_SIZE;
case 12:
case 11:
return GEN11_LR_CONTEXT_RENDER_SIZE;
case 9:
return GEN9_LR_CONTEXT_RENDER_SIZE;
case 8:
return GEN8_LR_CONTEXT_RENDER_SIZE;
case 7:
if (IS_HASWELL(gt->i915))
return HSW_CXT_TOTAL_SIZE;
cxt_size = intel_uncore_read(uncore, GEN7_CXT_SIZE);
return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
PAGE_SIZE);
case 6:
cxt_size = intel_uncore_read(uncore, CXT_SIZE);
return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
PAGE_SIZE);
case 5:
case 4:
/*
* There is a discrepancy here between the size reported
* by the register and the size of the context layout
* in the docs. Both are described as authorative!
*
* The discrepancy is on the order of a few cachelines,
* but the total is under one page (4k), which is our
* minimum allocation anyway so it should all come
* out in the wash.
*/
cxt_size = intel_uncore_read(uncore, CXT_SIZE) + 1;
drm_dbg(>->i915->drm,
"graphics_ver = %d CXT_SIZE = %d bytes [0x%08x]\n",
GRAPHICS_VER(gt->i915), cxt_size * 64,
cxt_size - 1);
return round_up(cxt_size * 64, PAGE_SIZE);
case 3:
case 2:
/* For the special day when i810 gets merged. */
case 1:
return 0;
}
break;
default:
MISSING_CASE(class);
fallthrough;
case VIDEO_DECODE_CLASS:
case VIDEO_ENHANCEMENT_CLASS:
case COPY_ENGINE_CLASS:
case OTHER_CLASS:
if (GRAPHICS_VER(gt->i915) < 8)
return 0;
return GEN8_LR_CONTEXT_OTHER_SIZE;
}
}
static u32 __engine_mmio_base(struct drm_i915_private *i915,
const struct engine_mmio_base *bases)
{
int i;
for (i = 0; i < MAX_MMIO_BASES; i++)
if (GRAPHICS_VER(i915) >= bases[i].graphics_ver)
break;
GEM_BUG_ON(i == MAX_MMIO_BASES);
GEM_BUG_ON(!bases[i].base);
return bases[i].base;
}
static void __sprint_engine_name(struct intel_engine_cs *engine)
{
/*
* Before we know what the uABI name for this engine will be,
* we still would like to keep track of this engine in the debug logs.
* We throw in a ' here as a reminder that this isn't its final name.
*/
GEM_WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s'%u",
intel_engine_class_repr(engine->class),
engine->instance) >= sizeof(engine->name));
}
void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
{
/*
* Though they added more rings on g4x/ilk, they did not add
* per-engine HWSTAM until gen6.
*/
if (GRAPHICS_VER(engine->i915) < 6 && engine->class != RENDER_CLASS)
return;
if (GRAPHICS_VER(engine->i915) >= 3)
ENGINE_WRITE(engine, RING_HWSTAM, mask);
else
ENGINE_WRITE16(engine, RING_HWSTAM, mask);
}
static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
{
/* Mask off all writes into the unknown HWSP */
intel_engine_set_hwsp_writemask(engine, ~0u);
}
static void nop_irq_handler(struct intel_engine_cs *engine, u16 iir)
{
GEM_DEBUG_WARN_ON(iir);
}
static u32 get_reset_domain(u8 ver, enum intel_engine_id id)
{
u32 reset_domain;
if (ver >= 11) {
static const u32 engine_reset_domains[] = {
[RCS0] = GEN11_GRDOM_RENDER,
[BCS0] = GEN11_GRDOM_BLT,
[BCS1] = XEHPC_GRDOM_BLT1,
[BCS2] = XEHPC_GRDOM_BLT2,
[BCS3] = XEHPC_GRDOM_BLT3,
[BCS4] = XEHPC_GRDOM_BLT4,
[BCS5] = XEHPC_GRDOM_BLT5,
[BCS6] = XEHPC_GRDOM_BLT6,
[BCS7] = XEHPC_GRDOM_BLT7,
[BCS8] = XEHPC_GRDOM_BLT8,
[VCS0] = GEN11_GRDOM_MEDIA,
[VCS1] = GEN11_GRDOM_MEDIA2,
[VCS2] = GEN11_GRDOM_MEDIA3,
[VCS3] = GEN11_GRDOM_MEDIA4,
[VCS4] = GEN11_GRDOM_MEDIA5,
[VCS5] = GEN11_GRDOM_MEDIA6,
[VCS6] = GEN11_GRDOM_MEDIA7,
[VCS7] = GEN11_GRDOM_MEDIA8,
[VECS0] = GEN11_GRDOM_VECS,
[VECS1] = GEN11_GRDOM_VECS2,
[VECS2] = GEN11_GRDOM_VECS3,
[VECS3] = GEN11_GRDOM_VECS4,
[CCS0] = GEN11_GRDOM_RENDER,
[CCS1] = GEN11_GRDOM_RENDER,
[CCS2] = GEN11_GRDOM_RENDER,
[CCS3] = GEN11_GRDOM_RENDER,
[GSC0] = GEN12_GRDOM_GSC,
};
GEM_BUG_ON(id >= ARRAY_SIZE(engine_reset_domains) ||
!engine_reset_domains[id]);
reset_domain = engine_reset_domains[id];
} else {
static const u32 engine_reset_domains[] = {
[RCS0] = GEN6_GRDOM_RENDER,
[BCS0] = GEN6_GRDOM_BLT,
[VCS0] = GEN6_GRDOM_MEDIA,
[VCS1] = GEN8_GRDOM_MEDIA2,
[VECS0] = GEN6_GRDOM_VECS,
};
GEM_BUG_ON(id >= ARRAY_SIZE(engine_reset_domains) ||
!engine_reset_domains[id]);
reset_domain = engine_reset_domains[id];
}
return reset_domain;
}
static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
u8 logical_instance)
{
const struct engine_info *info = &intel_engines[id];
struct drm_i915_private *i915 = gt->i915;
struct intel_engine_cs *engine;
u8 guc_class;
BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
BUILD_BUG_ON(I915_MAX_VCS > (MAX_ENGINE_INSTANCE + 1));
BUILD_BUG_ON(I915_MAX_VECS > (MAX_ENGINE_INSTANCE + 1));
if (GEM_DEBUG_WARN_ON(id >= ARRAY_SIZE(gt->engine)))
return -EINVAL;
if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
return -EINVAL;
if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
return -EINVAL;
if (GEM_DEBUG_WARN_ON(gt->engine_class[info->class][info->instance]))
return -EINVAL;
engine = kzalloc(sizeof(*engine), GFP_KERNEL);
if (!engine)
return -ENOMEM;
BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
INIT_LIST_HEAD(&engine->pinned_contexts_list);
engine->id = id;
engine->legacy_idx = INVALID_ENGINE;
engine->mask = BIT(id);
engine->reset_domain = get_reset_domain(GRAPHICS_VER(gt->i915),
id);
engine->i915 = i915;
engine->gt = gt;
engine->uncore = gt->uncore;
guc_class = engine_class_to_guc_class(info->class);
engine->guc_id = MAKE_GUC_ID(guc_class, info->instance);
engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases);
engine->irq_handler = nop_irq_handler;
engine->class = info->class;
engine->instance = info->instance;
engine->logical_mask = BIT(logical_instance);
__sprint_engine_name(engine);
if ((engine->class == COMPUTE_CLASS && !RCS_MASK(engine->gt) &&
__ffs(CCS_MASK(engine->gt)) == engine->instance) ||
engine->class == RENDER_CLASS)
engine->flags |= I915_ENGINE_FIRST_RENDER_COMPUTE;
/* features common between engines sharing EUs */
if (engine->class == RENDER_CLASS || engine->class == COMPUTE_CLASS) {
engine->flags |= I915_ENGINE_HAS_RCS_REG_STATE;
engine->flags |= I915_ENGINE_HAS_EU_PRIORITY;
}
engine->props.heartbeat_interval_ms =
CONFIG_DRM_I915_HEARTBEAT_INTERVAL;
engine->props.max_busywait_duration_ns =
CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT;
engine->props.preempt_timeout_ms =
CONFIG_DRM_I915_PREEMPT_TIMEOUT;
engine->props.stop_timeout_ms =
CONFIG_DRM_I915_STOP_TIMEOUT;
engine->props.timeslice_duration_ms =
CONFIG_DRM_I915_TIMESLICE_DURATION;
/*
* Mid-thread pre-emption is not available in Gen12. Unfortunately,
* some compute workloads run quite long threads. That means they get
* reset due to not pre-empting in a timely manner. So, bump the
* pre-emption timeout value to be much higher for compute engines.
*/
if (GRAPHICS_VER(i915) == 12 && (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE))
engine->props.preempt_timeout_ms = CONFIG_DRM_I915_PREEMPT_TIMEOUT_COMPUTE;
/* Cap properties according to any system limits */
#define CLAMP_PROP(field) \
do { \
u64 clamp = intel_clamp_##field(engine, engine->props.field); \
if (clamp != engine->props.field) { \
drm_notice(&engine->i915->drm, \
"Warning, clamping %s to %lld to prevent overflow\n", \
#field, clamp); \
engine->props.field = clamp; \
} \
} while (0)
CLAMP_PROP(heartbeat_interval_ms);
CLAMP_PROP(max_busywait_duration_ns);
CLAMP_PROP(preempt_timeout_ms);
CLAMP_PROP(stop_timeout_ms);
CLAMP_PROP(timeslice_duration_ms);
#undef CLAMP_PROP
engine->defaults = engine->props; /* never to change again */
engine->context_size = intel_engine_context_size(gt, engine->class);
if (WARN_ON(engine->context_size > BIT(20)))
engine->context_size = 0;
if (engine->context_size)
DRIVER_CAPS(i915)->has_logical_contexts = true;
ewma__engine_latency_init(&engine->latency);
ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
/* Scrub mmio state on takeover */
intel_engine_sanitize_mmio(engine);
gt->engine_class[info->class][info->instance] = engine;
gt->engine[id] = engine;
return 0;
}
u64 intel_clamp_heartbeat_interval_ms(struct intel_engine_cs *engine, u64 value)
{
value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
return value;
}
u64 intel_clamp_max_busywait_duration_ns(struct intel_engine_cs *engine, u64 value)
{
value = min(value, jiffies_to_nsecs(2));
return value;
}
u64 intel_clamp_preempt_timeout_ms(struct intel_engine_cs *engine, u64 value)
{
/*
* NB: The GuC API only supports 32bit values. However, the limit is further
* reduced due to internal calculations which would otherwise overflow.
*/
if (intel_guc_submission_is_wanted(&engine->gt->uc.guc))
value = min_t(u64, value, guc_policy_max_preempt_timeout_ms());
value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
return value;
}
u64 intel_clamp_stop_timeout_ms(struct intel_engine_cs *engine, u64 value)
{
value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
return value;
}
u64 intel_clamp_timeslice_duration_ms(struct intel_engine_cs *engine, u64 value)
{
/*
* NB: The GuC API only supports 32bit values. However, the limit is further
* reduced due to internal calculations which would otherwise overflow.
*/
if (intel_guc_submission_is_wanted(&engine->gt->uc.guc))
value = min_t(u64, value, guc_policy_max_exec_quantum_ms());
value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT));
return value;
}
static void __setup_engine_capabilities(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
if (engine->class == VIDEO_DECODE_CLASS) {
/*
* HEVC support is present on first engine instance
* before Gen11 and on all instances afterwards.
*/
if (GRAPHICS_VER(i915) >= 11 ||
(GRAPHICS_VER(i915) >= 9 && engine->instance == 0))
engine->uabi_capabilities |=
I915_VIDEO_CLASS_CAPABILITY_HEVC;
/*
* SFC block is present only on even logical engine
* instances.
*/
if ((GRAPHICS_VER(i915) >= 11 &&
(engine->gt->info.vdbox_sfc_access &
BIT(engine->instance))) ||
(GRAPHICS_VER(i915) >= 9 && engine->instance == 0))
engine->uabi_capabilities |=
I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
} else if (engine->class == VIDEO_ENHANCEMENT_CLASS) {
if (GRAPHICS_VER(i915) >= 9 &&
engine->gt->info.sfc_mask & BIT(engine->instance))
engine->uabi_capabilities |=
I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
}
}
static void intel_setup_engine_capabilities(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, gt, id)
__setup_engine_capabilities(engine);
}
/**
* intel_engines_release() - free the resources allocated for Command Streamers
* @gt: pointer to struct intel_gt
*/
void intel_engines_release(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
/*
* Before we release the resources held by engine, we must be certain
* that the HW is no longer accessing them -- having the GPU scribble
* to or read from a page being used for something else causes no end
* of fun.
*
* The GPU should be reset by this point, but assume the worst just
* in case we aborted before completely initialising the engines.
*/
GEM_BUG_ON(intel_gt_pm_is_awake(gt));
if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
__intel_gt_reset(gt, ALL_ENGINES);
/* Decouple the backend; but keep the layout for late GPU resets */
for_each_engine(engine, gt, id) {
if (!engine->release)
continue;
intel_wakeref_wait_for_idle(&engine->wakeref);
GEM_BUG_ON(intel_engine_pm_is_awake(engine));
engine->release(engine);
engine->release = NULL;
memset(&engine->reset, 0, sizeof(engine->reset));
}
}
void intel_engine_free_request_pool(struct intel_engine_cs *engine)
{
if (!engine->request_pool)
return;
kmem_cache_free(i915_request_slab_cache(), engine->request_pool);
}
void intel_engines_free(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
/* Free the requests! dma-resv keeps fences around for an eternity */
rcu_barrier();
for_each_engine(engine, gt, id) {
intel_engine_free_request_pool(engine);
kfree(engine);
gt->engine[id] = NULL;
}
}
static
bool gen11_vdbox_has_sfc(struct intel_gt *gt,
unsigned int physical_vdbox,
unsigned int logical_vdbox, u16 vdbox_mask)
{
struct drm_i915_private *i915 = gt->i915;
/*
* In Gen11, only even numbered logical VDBOXes are hooked
* up to an SFC (Scaler & Format Converter) unit.
* In Gen12, Even numbered physical instance always are connected
* to an SFC. Odd numbered physical instances have SFC only if
* previous even instance is fused off.
*
* Starting with Xe_HP, there's also a dedicated SFC_ENABLE field
* in the fuse register that tells us whether a specific SFC is present.
*/
if ((gt->info.sfc_mask & BIT(physical_vdbox / 2)) == 0)
return false;
else if (MEDIA_VER(i915) >= 12)
return (physical_vdbox % 2 == 0) ||
!(BIT(physical_vdbox - 1) & vdbox_mask);
else if (MEDIA_VER(i915) == 11)
return logical_vdbox % 2 == 0;
return false;
}
static void engine_mask_apply_media_fuses(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
unsigned int logical_vdbox = 0;
unsigned int i;
u32 media_fuse, fuse1;
u16 vdbox_mask;
u16 vebox_mask;
if (MEDIA_VER(gt->i915) < 11)
return;
/*
* On newer platforms the fusing register is called 'enable' and has
* enable semantics, while on older platforms it is called 'disable'
* and bits have disable semantices.
*/
media_fuse = intel_uncore_read(gt->uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
if (MEDIA_VER_FULL(i915) < IP_VER(12, 50))
media_fuse = ~media_fuse;
vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
GEN11_GT_VEBOX_DISABLE_SHIFT;
if (MEDIA_VER_FULL(i915) >= IP_VER(12, 50)) {
fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1);
gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1);
} else {
gt->info.sfc_mask = ~0;
}
for (i = 0; i < I915_MAX_VCS; i++) {
if (!HAS_ENGINE(gt, _VCS(i))) {
vdbox_mask &= ~BIT(i);
continue;
}
if (!(BIT(i) & vdbox_mask)) {
gt->info.engine_mask &= ~BIT(_VCS(i));
drm_dbg(&i915->drm, "vcs%u fused off\n", i);
continue;
}
if (gen11_vdbox_has_sfc(gt, i, logical_vdbox, vdbox_mask))
gt->info.vdbox_sfc_access |= BIT(i);
logical_vdbox++;
}
drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n",
vdbox_mask, VDBOX_MASK(gt));
GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt));
for (i = 0; i < I915_MAX_VECS; i++) {
if (!HAS_ENGINE(gt, _VECS(i))) {
vebox_mask &= ~BIT(i);
continue;
}
if (!(BIT(i) & vebox_mask)) {
gt->info.engine_mask &= ~BIT(_VECS(i));
drm_dbg(&i915->drm, "vecs%u fused off\n", i);
}
}
drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n",
vebox_mask, VEBOX_MASK(gt));
GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt));
}
static void engine_mask_apply_compute_fuses(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
struct intel_gt_info *info = >->info;
int ss_per_ccs = info->sseu.max_subslices / I915_MAX_CCS;
unsigned long ccs_mask;
unsigned int i;
if (GRAPHICS_VER(i915) < 11)
return;
if (hweight32(CCS_MASK(gt)) <= 1)
return;
ccs_mask = intel_slicemask_from_xehp_dssmask(info->sseu.compute_subslice_mask,
ss_per_ccs);
/*
* If all DSS in a quadrant are fused off, the corresponding CCS
* engine is not available for use.
*/
for_each_clear_bit(i, &ccs_mask, I915_MAX_CCS) {
info->engine_mask &= ~BIT(_CCS(i));
drm_dbg(&i915->drm, "ccs%u fused off\n", i);
}
}
static void engine_mask_apply_copy_fuses(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
struct intel_gt_info *info = >->info;
unsigned long meml3_mask;
unsigned long quad;
if (!(GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60) &&
GRAPHICS_VER_FULL(i915) < IP_VER(12, 70)))
return;
meml3_mask = intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3);
meml3_mask = REG_FIELD_GET(GEN12_MEML3_EN_MASK, meml3_mask);
/*
* Link Copy engines may be fused off according to meml3_mask. Each
* bit is a quad that houses 2 Link Copy and two Sub Copy engines.
*/
for_each_clear_bit(quad, &meml3_mask, GEN12_MAX_MSLICES) {
unsigned int instance = quad * 2 + 1;
intel_engine_mask_t mask = GENMASK(_BCS(instance + 1),
_BCS(instance));
if (mask & info->engine_mask) {
drm_dbg(&i915->drm, "bcs%u fused off\n", instance);
drm_dbg(&i915->drm, "bcs%u fused off\n", instance + 1);
info->engine_mask &= ~mask;
}
}
}
/*
* Determine which engines are fused off in our particular hardware.
* Note that we have a catch-22 situation where we need to be able to access
* the blitter forcewake domain to read the engine fuses, but at the same time
* we need to know which engines are available on the system to know which
* forcewake domains are present. We solve this by intializing the forcewake
* domains based on the full engine mask in the platform capabilities before
* calling this function and pruning the domains for fused-off engines
* afterwards.
*/
static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
{
struct intel_gt_info *info = >->info;
GEM_BUG_ON(!info->engine_mask);
engine_mask_apply_media_fuses(gt);
engine_mask_apply_compute_fuses(gt);
engine_mask_apply_copy_fuses(gt);
/*
* The only use of the GSC CS is to load and communicate with the GSC
* FW, so we have no use for it if we don't have the FW.
*
* IMPORTANT: in cases where we don't have the GSC FW, we have a
* catch-22 situation that breaks media C6 due to 2 requirements:
* 1) once turned on, the GSC power well will not go to sleep unless the
* GSC FW is loaded.
* 2) to enable idling (which is required for media C6) we need to
* initialize the IDLE_MSG register for the GSC CS and do at least 1
* submission, which will wake up the GSC power well.
*/
if (__HAS_ENGINE(info->engine_mask, GSC0) && !intel_uc_wants_gsc_uc(>->uc)) {
drm_notice(>->i915->drm,
"No GSC FW selected, disabling GSC CS and media C6\n");
info->engine_mask &= ~BIT(GSC0);
}
return info->engine_mask;
}
static void populate_logical_ids(struct intel_gt *gt, u8 *logical_ids,
u8 class, const u8 *map, u8 num_instances)
{
int i, j;
u8 current_logical_id = 0;
for (j = 0; j < num_instances; ++j) {
for (i = 0; i < ARRAY_SIZE(intel_engines); ++i) {
if (!HAS_ENGINE(gt, i) ||
intel_engines[i].class != class)
continue;
if (intel_engines[i].instance == map[j]) {
logical_ids[intel_engines[i].instance] =
current_logical_id++;
break;
}
}
}
}
static void setup_logical_ids(struct intel_gt *gt, u8 *logical_ids, u8 class)
{
/*
* Logical to physical mapping is needed for proper support
* to split-frame feature.
*/
if (MEDIA_VER(gt->i915) >= 11 && class == VIDEO_DECODE_CLASS) {
const u8 map[] = { 0, 2, 4, 6, 1, 3, 5, 7 };
populate_logical_ids(gt, logical_ids, class,
map, ARRAY_SIZE(map));
} else {
int i;
u8 map[MAX_ENGINE_INSTANCE + 1];
for (i = 0; i < MAX_ENGINE_INSTANCE + 1; ++i)
map[i] = i;
populate_logical_ids(gt, logical_ids, class,
map, ARRAY_SIZE(map));
}
}
/**
* intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
* @gt: pointer to struct intel_gt
*
* Return: non-zero if the initialization failed.
*/
int intel_engines_init_mmio(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
const unsigned int engine_mask = init_engine_mask(gt);
unsigned int mask = 0;
unsigned int i, class;
u8 logical_ids[MAX_ENGINE_INSTANCE + 1];
int err;
drm_WARN_ON(&i915->drm, engine_mask == 0);
drm_WARN_ON(&i915->drm, engine_mask &
GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
if (i915_inject_probe_failure(i915))
return -ENODEV;
for (class = 0; class < MAX_ENGINE_CLASS + 1; ++class) {
setup_logical_ids(gt, logical_ids, class);
for (i = 0; i < ARRAY_SIZE(intel_engines); ++i) {
u8 instance = intel_engines[i].instance;
if (intel_engines[i].class != class ||
!HAS_ENGINE(gt, i))
continue;
err = intel_engine_setup(gt, i,
logical_ids[instance]);
if (err)
goto cleanup;
mask |= BIT(i);
}
}
/*
* Catch failures to update intel_engines table when the new engines
* are added to the driver by a warning and disabling the forgotten
* engines.
*/
if (drm_WARN_ON(&i915->drm, mask != engine_mask))
gt->info.engine_mask = mask;
gt->info.num_engines = hweight32(mask);
intel_gt_check_and_clear_faults(gt);
intel_setup_engine_capabilities(gt);
intel_uncore_prune_engine_fw_domains(gt->uncore, gt);
return 0;
cleanup:
intel_engines_free(gt);
return err;
}
void intel_engine_init_execlists(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
execlists->port_mask = 1;
GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
memset(execlists->pending, 0, sizeof(execlists->pending));
execlists->active =
memset(execlists->inflight, 0, sizeof(execlists->inflight));
}
static void cleanup_status_page(struct intel_engine_cs *engine)
{
struct i915_vma *vma;
/* Prevent writes into HWSP after returning the page to the system */
intel_engine_set_hwsp_writemask(engine, ~0u);
vma = fetch_and_zero(&engine->status_page.vma);
if (!vma)
return;
if (!HWS_NEEDS_PHYSICAL(engine->i915))
i915_vma_unpin(vma);
i915_gem_object_unpin_map(vma->obj);
i915_gem_object_put(vma->obj);
}
static int pin_ggtt_status_page(struct intel_engine_cs *engine,
struct i915_gem_ww_ctx *ww,
struct i915_vma *vma)
{
unsigned int flags;
if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt))
/*
* On g33, we cannot place HWS above 256MiB, so
* restrict its pinning to the low mappable arena.
* Though this restriction is not documented for
* gen4, gen5, or byt, they also behave similarly
* and hang if the HWS is placed at the top of the
* GTT. To generalise, it appears that all !llc
* platforms have issues with us placing the HWS
* above the mappable region (even though we never
* actually map it).
*/
flags = PIN_MAPPABLE;
else
flags = PIN_HIGH;
return i915_ggtt_pin(vma, ww, 0, flags);
}
static int init_status_page(struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *obj;
struct i915_gem_ww_ctx ww;
struct i915_vma *vma;
void *vaddr;
int ret;
INIT_LIST_HEAD(&engine->status_page.timelines);
/*
* Though the HWS register does support 36bit addresses, historically
* we have had hangs and corruption reported due to wild writes if
* the HWS is placed above 4G. We only allow objects to be allocated
* in GFP_DMA32 for i965, and no earlier physical address users had
* access to more than 4G.
*/
obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
if (IS_ERR(obj)) {
drm_err(&engine->i915->drm,
"Failed to allocate status page\n");
return PTR_ERR(obj);
}
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_put;
}
i915_gem_ww_ctx_init(&ww, true);
retry:
ret = i915_gem_object_lock(obj, &ww);
if (!ret && !HWS_NEEDS_PHYSICAL(engine->i915))
ret = pin_ggtt_status_page(engine, &ww, vma);
if (ret)
goto err;
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
goto err_unpin;
}
engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE);
engine->status_page.vma = vma;
err_unpin:
if (ret)
i915_vma_unpin(vma);
err:
if (ret == -EDEADLK) {
ret = i915_gem_ww_ctx_backoff(&ww);
if (!ret)
goto retry;
}
i915_gem_ww_ctx_fini(&ww);
err_put:
if (ret)
i915_gem_object_put(obj);
return ret;
}
static int intel_engine_init_tlb_invalidation(struct intel_engine_cs *engine)
{
static const union intel_engine_tlb_inv_reg gen8_regs[] = {
[RENDER_CLASS].reg = GEN8_RTCR,
[VIDEO_DECODE_CLASS].reg = GEN8_M1TCR, /* , GEN8_M2TCR */
[VIDEO_ENHANCEMENT_CLASS].reg = GEN8_VTCR,
[COPY_ENGINE_CLASS].reg = GEN8_BTCR,
};
static const union intel_engine_tlb_inv_reg gen12_regs[] = {
[RENDER_CLASS].reg = GEN12_GFX_TLB_INV_CR,
[VIDEO_DECODE_CLASS].reg = GEN12_VD_TLB_INV_CR,
[VIDEO_ENHANCEMENT_CLASS].reg = GEN12_VE_TLB_INV_CR,
[COPY_ENGINE_CLASS].reg = GEN12_BLT_TLB_INV_CR,
[COMPUTE_CLASS].reg = GEN12_COMPCTX_TLB_INV_CR,
};
static const union intel_engine_tlb_inv_reg xehp_regs[] = {
[RENDER_CLASS].mcr_reg = XEHP_GFX_TLB_INV_CR,
[VIDEO_DECODE_CLASS].mcr_reg = XEHP_VD_TLB_INV_CR,
[VIDEO_ENHANCEMENT_CLASS].mcr_reg = XEHP_VE_TLB_INV_CR,
[COPY_ENGINE_CLASS].mcr_reg = XEHP_BLT_TLB_INV_CR,
[COMPUTE_CLASS].mcr_reg = XEHP_COMPCTX_TLB_INV_CR,
};
static const union intel_engine_tlb_inv_reg xelpmp_regs[] = {
[VIDEO_DECODE_CLASS].reg = GEN12_VD_TLB_INV_CR,
[VIDEO_ENHANCEMENT_CLASS].reg = GEN12_VE_TLB_INV_CR,
[OTHER_CLASS].reg = XELPMP_GSC_TLB_INV_CR,
};
struct drm_i915_private *i915 = engine->i915;
const unsigned int instance = engine->instance;
const unsigned int class = engine->class;
const union intel_engine_tlb_inv_reg *regs;
union intel_engine_tlb_inv_reg reg;
unsigned int num = 0;
u32 val;
/*
* New platforms should not be added with catch-all-newer (>=)
* condition so that any later platform added triggers the below warning
* and in turn mandates a human cross-check of whether the invalidation
* flows have compatible semantics.
*
* For instance with the 11.00 -> 12.00 transition three out of five
* respective engine registers were moved to masked type. Then after the
* 12.00 -> 12.50 transition multi cast handling is required too.
*/
if (engine->gt->type == GT_MEDIA) {
if (MEDIA_VER_FULL(i915) == IP_VER(13, 0)) {
regs = xelpmp_regs;
num = ARRAY_SIZE(xelpmp_regs);
}
} else {
if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 71) ||
GRAPHICS_VER_FULL(i915) == IP_VER(12, 70) ||
GRAPHICS_VER_FULL(i915) == IP_VER(12, 50) ||
GRAPHICS_VER_FULL(i915) == IP_VER(12, 55)) {
regs = xehp_regs;
num = ARRAY_SIZE(xehp_regs);
} else if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 0) ||
GRAPHICS_VER_FULL(i915) == IP_VER(12, 10)) {
regs = gen12_regs;
num = ARRAY_SIZE(gen12_regs);
} else if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) <= 11) {
regs = gen8_regs;
num = ARRAY_SIZE(gen8_regs);
} else if (GRAPHICS_VER(i915) < 8) {
return 0;
}
}
if (gt_WARN_ONCE(engine->gt, !num,
"Platform does not implement TLB invalidation!"))
return -ENODEV;
if (gt_WARN_ON_ONCE(engine->gt,
class >= num ||
(!regs[class].reg.reg &&
!regs[class].mcr_reg.reg)))
return -ERANGE;
reg = regs[class];
if (regs == xelpmp_regs && class == OTHER_CLASS) {
/*
* There's only a single GSC instance, but it uses register bit
* 1 instead of either 0 or OTHER_GSC_INSTANCE.
*/
GEM_WARN_ON(instance != OTHER_GSC_INSTANCE);
val = 1;
} else if (regs == gen8_regs && class == VIDEO_DECODE_CLASS && instance == 1) {
reg.reg = GEN8_M2TCR;
val = 0;
} else {
val = instance;
}
val = BIT(val);
engine->tlb_inv.mcr = regs == xehp_regs;
engine->tlb_inv.reg = reg;
engine->tlb_inv.done = val;
if (GRAPHICS_VER(i915) >= 12 &&
(engine->class == VIDEO_DECODE_CLASS ||
engine->class == VIDEO_ENHANCEMENT_CLASS ||
engine->class == COMPUTE_CLASS ||
engine->class == OTHER_CLASS))
engine->tlb_inv.request = _MASKED_BIT_ENABLE(val);
else
engine->tlb_inv.request = val;
return 0;
}
static int engine_setup_common(struct intel_engine_cs *engine)
{
int err;
init_llist_head(&engine->barrier_tasks);
err = intel_engine_init_tlb_invalidation(engine);
if (err)
return err;
err = init_status_page(engine);
if (err)
return err;
engine->breadcrumbs = intel_breadcrumbs_create(engine);
if (!engine->breadcrumbs) {
err = -ENOMEM;
goto err_status;
}
engine->sched_engine = i915_sched_engine_create(ENGINE_PHYSICAL);
if (!engine->sched_engine) {
err = -ENOMEM;
goto err_sched_engine;
}
engine->sched_engine->private_data = engine;
err = intel_engine_init_cmd_parser(engine);
if (err)
goto err_cmd_parser;
intel_engine_init_execlists(engine);
intel_engine_init__pm(engine);
intel_engine_init_retire(engine);
/* Use the whole device by default */
engine->sseu =
intel_sseu_from_device_info(&engine->gt->info.sseu);
intel_engine_init_workarounds(engine);
intel_engine_init_whitelist(engine);
intel_engine_init_ctx_wa(engine);
if (GRAPHICS_VER(engine->i915) >= 12)
engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO;
return 0;
err_cmd_parser:
i915_sched_engine_put(engine->sched_engine);
err_sched_engine:
intel_breadcrumbs_put(engine->breadcrumbs);
err_status:
cleanup_status_page(engine);
return err;
}
struct measure_breadcrumb {
struct i915_request rq;
struct intel_ring ring;
u32 cs[2048];
};
static int measure_breadcrumb_dw(struct intel_context *ce)
{
struct intel_engine_cs *engine = ce->engine;
struct measure_breadcrumb *frame;
int dw;
GEM_BUG_ON(!engine->gt->scratch);
frame = kzalloc(sizeof(*frame), GFP_KERNEL);
if (!frame)
return -ENOMEM;
frame->rq.i915 = engine->i915;
frame->rq.engine = engine;
frame->rq.context = ce;
rcu_assign_pointer(frame->rq.timeline, ce->timeline);
frame->rq.hwsp_seqno = ce->timeline->hwsp_seqno;
frame->ring.vaddr = frame->cs;
frame->ring.size = sizeof(frame->cs);
frame->ring.wrap =
BITS_PER_TYPE(frame->ring.size) - ilog2(frame->ring.size);
frame->ring.effective_size = frame->ring.size;
intel_ring_update_space(&frame->ring);
frame->rq.ring = &frame->ring;
mutex_lock(&ce->timeline->mutex);
spin_lock_irq(&engine->sched_engine->lock);
dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
spin_unlock_irq(&engine->sched_engine->lock);
mutex_unlock(&ce->timeline->mutex);
GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
kfree(frame);
return dw;
}
struct intel_context *
intel_engine_create_pinned_context(struct intel_engine_cs *engine,
struct i915_address_space *vm,
unsigned int ring_size,
unsigned int hwsp,
struct lock_class_key *key,
const char *name)
{
struct intel_context *ce;
int err;
ce = intel_context_create(engine);
if (IS_ERR(ce))
return ce;
__set_bit(CONTEXT_BARRIER_BIT, &ce->flags);
ce->timeline = page_pack_bits(NULL, hwsp);
ce->ring = NULL;
ce->ring_size = ring_size;
i915_vm_put(ce->vm);
ce->vm = i915_vm_get(vm);
err = intel_context_pin(ce); /* perma-pin so it is always available */
if (err) {
intel_context_put(ce);
return ERR_PTR(err);
}
list_add_tail(&ce->pinned_contexts_link, &engine->pinned_contexts_list);
/*
* Give our perma-pinned kernel timelines a separate lockdep class,
* so that we can use them from within the normal user timelines
* should we need to inject GPU operations during their request
* construction.
*/
lockdep_set_class_and_name(&ce->timeline->mutex, key, name);
return ce;
}
void intel_engine_destroy_pinned_context(struct intel_context *ce)
{
struct intel_engine_cs *engine = ce->engine;
struct i915_vma *hwsp = engine->status_page.vma;
GEM_BUG_ON(ce->timeline->hwsp_ggtt != hwsp);
mutex_lock(&hwsp->vm->mutex);
list_del(&ce->timeline->engine_link);
mutex_unlock(&hwsp->vm->mutex);
list_del(&ce->pinned_contexts_link);
intel_context_unpin(ce);
intel_context_put(ce);
}
static struct intel_context *
create_kernel_context(struct intel_engine_cs *engine)
{
static struct lock_class_key kernel;
return intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K,
I915_GEM_HWS_SEQNO_ADDR,
&kernel, "kernel_context");
}
/*
* engine_init_common - initialize engine state which might require hw access
* @engine: Engine to initialize.
*
* Initializes @engine@ structure members shared between legacy and execlists
* submission modes which do require hardware access.
*
* Typcally done at later stages of submission mode specific engine setup.
*
* Returns zero on success or an error code on failure.
*/
static int engine_init_common(struct intel_engine_cs *engine)
{
struct intel_context *ce;
int ret;
engine->set_default_submission(engine);
/*
* We may need to do things with the shrinker which
* require us to immediately switch back to the default
* context. This can cause a problem as pinning the
* default context also requires GTT space which may not
* be available. To avoid this we always pin the default
* context.
*/
ce = create_kernel_context(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
ret = measure_breadcrumb_dw(ce);
if (ret < 0)
goto err_context;
engine->emit_fini_breadcrumb_dw = ret;
engine->kernel_context = ce;
return 0;
err_context:
intel_engine_destroy_pinned_context(ce);
return ret;
}
int intel_engines_init(struct intel_gt *gt)
{
int (*setup)(struct intel_engine_cs *engine);
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err;
if (intel_uc_uses_guc_submission(>->uc)) {
gt->submission_method = INTEL_SUBMISSION_GUC;
setup = intel_guc_submission_setup;
} else if (HAS_EXECLISTS(gt->i915)) {
gt->submission_method = INTEL_SUBMISSION_ELSP;
setup = intel_execlists_submission_setup;
} else {
gt->submission_method = INTEL_SUBMISSION_RING;
setup = intel_ring_submission_setup;
}
for_each_engine(engine, gt, id) {
err = engine_setup_common(engine);
if (err)
return err;
err = setup(engine);
if (err) {
intel_engine_cleanup_common(engine);
return err;
}
/* The backend should now be responsible for cleanup */
GEM_BUG_ON(engine->release == NULL);
err = engine_init_common(engine);
if (err)
return err;
intel_engine_add_user(engine);
}
return 0;
}
/**
* intel_engine_cleanup_common - cleans up the engine state created by
* the common initiailizers.
* @engine: Engine to cleanup.
*
* This cleans up everything created by the common helpers.
*/
void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
GEM_BUG_ON(!list_empty(&engine->sched_engine->requests));
i915_sched_engine_put(engine->sched_engine);
intel_breadcrumbs_put(engine->breadcrumbs);
intel_engine_fini_retire(engine);
intel_engine_cleanup_cmd_parser(engine);
if (engine->default_state)
fput(engine->default_state);
if (engine->kernel_context)
intel_engine_destroy_pinned_context(engine->kernel_context);
GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
cleanup_status_page(engine);
intel_wa_list_free(&engine->ctx_wa_list);
intel_wa_list_free(&engine->wa_list);
intel_wa_list_free(&engine->whitelist);
}
/**
* intel_engine_resume - re-initializes the HW state of the engine
* @engine: Engine to resume.
*
* Returns zero on success or an error code on failure.
*/
int intel_engine_resume(struct intel_engine_cs *engine)
{
intel_engine_apply_workarounds(engine);
intel_engine_apply_whitelist(engine);
return engine->resume(engine);
}
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
u64 acthd;
if (GRAPHICS_VER(i915) >= 8)
acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW);
else if (GRAPHICS_VER(i915) >= 4)
acthd = ENGINE_READ(engine, RING_ACTHD);
else
acthd = ENGINE_READ(engine, ACTHD);
return acthd;
}
u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
{
u64 bbaddr;
if (GRAPHICS_VER(engine->i915) >= 8)
bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW);
else
bbaddr = ENGINE_READ(engine, RING_BBADDR);
return bbaddr;
}
static unsigned long stop_timeout(const struct intel_engine_cs *engine)
{
if (in_atomic() || irqs_disabled()) /* inside atomic preempt-reset? */
return 0;
/*
* If we are doing a normal GPU reset, we can take our time and allow
* the engine to quiesce. We've stopped submission to the engine, and
* if we wait long enough an innocent context should complete and
* leave the engine idle. So they should not be caught unaware by
* the forthcoming GPU reset (which usually follows the stop_cs)!
*/
return READ_ONCE(engine->props.stop_timeout_ms);
}
static int __intel_engine_stop_cs(struct intel_engine_cs *engine,
int fast_timeout_us,
int slow_timeout_ms)
{
struct intel_uncore *uncore = engine->uncore;
const i915_reg_t mode = RING_MI_MODE(engine->mmio_base);
int err;
intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
/*
* Wa_22011802037: Prior to doing a reset, ensure CS is
* stopped, set ring stop bit and prefetch disable bit to halt CS
*/
if (IS_MTL_GRAPHICS_STEP(engine->i915, M, STEP_A0, STEP_B0) ||
(GRAPHICS_VER(engine->i915) >= 11 &&
GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 70)))
intel_uncore_write_fw(uncore, RING_MODE_GEN7(engine->mmio_base),
_MASKED_BIT_ENABLE(GEN12_GFX_PREFETCH_DISABLE));
err = __intel_wait_for_register_fw(engine->uncore, mode,
MODE_IDLE, MODE_IDLE,
fast_timeout_us,
slow_timeout_ms,
NULL);
/* A final mmio read to let GPU writes be hopefully flushed to memory */
intel_uncore_posting_read_fw(uncore, mode);
return err;
}
int intel_engine_stop_cs(struct intel_engine_cs *engine)
{
int err = 0;
if (GRAPHICS_VER(engine->i915) < 3)
return -ENODEV;
ENGINE_TRACE(engine, "\n");
/*
* TODO: Find out why occasionally stopping the CS times out. Seen
* especially with gem_eio tests.
*
* Occasionally trying to stop the cs times out, but does not adversely
* affect functionality. The timeout is set as a config parameter that
* defaults to 100ms. In most cases the follow up operation is to wait
* for pending MI_FORCE_WAKES. The assumption is that this timeout is
* sufficient for any pending MI_FORCEWAKEs to complete. Once root
* caused, the caller must check and handle the return from this
* function.
*/
if (__intel_engine_stop_cs(engine, 1000, stop_timeout(engine))) {
ENGINE_TRACE(engine,
"timed out on STOP_RING -> IDLE; HEAD:%04x, TAIL:%04x\n",
ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR,
ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR);
/*
* Sometimes we observe that the idle flag is not
* set even though the ring is empty. So double
* check before giving up.
*/
if ((ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) !=
(ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR))
err = -ETIMEDOUT;
}
return err;
}
void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
{
ENGINE_TRACE(engine, "\n");
ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
}
static u32 __cs_pending_mi_force_wakes(struct intel_engine_cs *engine)
{
static const i915_reg_t _reg[I915_NUM_ENGINES] = {
[RCS0] = MSG_IDLE_CS,
[BCS0] = MSG_IDLE_BCS,
[VCS0] = MSG_IDLE_VCS0,
[VCS1] = MSG_IDLE_VCS1,
[VCS2] = MSG_IDLE_VCS2,
[VCS3] = MSG_IDLE_VCS3,
[VCS4] = MSG_IDLE_VCS4,
[VCS5] = MSG_IDLE_VCS5,
[VCS6] = MSG_IDLE_VCS6,
[VCS7] = MSG_IDLE_VCS7,
[VECS0] = MSG_IDLE_VECS0,
[VECS1] = MSG_IDLE_VECS1,
[VECS2] = MSG_IDLE_VECS2,
[VECS3] = MSG_IDLE_VECS3,
[CCS0] = MSG_IDLE_CS,
[CCS1] = MSG_IDLE_CS,
[CCS2] = MSG_IDLE_CS,
[CCS3] = MSG_IDLE_CS,
};
u32 val;
if (!_reg[engine->id].reg)
return 0;
val = intel_uncore_read(engine->uncore, _reg[engine->id]);
/* bits[29:25] & bits[13:9] >> shift */
return (val & (val >> 16) & MSG_IDLE_FW_MASK) >> MSG_IDLE_FW_SHIFT;
}
static void __gpm_wait_for_fw_complete(struct intel_gt *gt, u32 fw_mask)
{
int ret;
/* Ensure GPM receives fw up/down after CS is stopped */
udelay(1);
/* Wait for forcewake request to complete in GPM */
ret = __intel_wait_for_register_fw(gt->uncore,
GEN9_PWRGT_DOMAIN_STATUS,
fw_mask, fw_mask, 5000, 0, NULL);
/* Ensure CS receives fw ack from GPM */
udelay(1);
if (ret)
GT_TRACE(gt, "Failed to complete pending forcewake %d\n", ret);
}
/*
* Wa_22011802037:gen12: In addition to stopping the cs, we need to wait for any
* pending MI_FORCE_WAKEUP requests that the CS has initiated to complete. The
* pending status is indicated by bits[13:9] (masked by bits[29:25]) in the
* MSG_IDLE register. There's one MSG_IDLE register per reset domain. Since we
* are concerned only with the gt reset here, we use a logical OR of pending
* forcewakeups from all reset domains and then wait for them to complete by
* querying PWRGT_DOMAIN_STATUS.
*/
void intel_engine_wait_for_pending_mi_fw(struct intel_engine_cs *engine)
{
u32 fw_pending = __cs_pending_mi_force_wakes(engine);
if (fw_pending)
__gpm_wait_for_fw_complete(engine->gt, fw_pending);
}
/* NB: please notice the memset */
void intel_engine_get_instdone(const struct intel_engine_cs *engine,
struct intel_instdone *instdone)
{
struct drm_i915_private *i915 = engine->i915;
struct intel_uncore *uncore = engine->uncore;
u32 mmio_base = engine->mmio_base;
int slice;
int subslice;
int iter;
memset(instdone, 0, sizeof(*instdone));
if (GRAPHICS_VER(i915) >= 8) {
instdone->instdone =
intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
if (engine->id != RCS0)
return;
instdone->slice_common =
intel_uncore_read(uncore, GEN7_SC_INSTDONE);
if (GRAPHICS_VER(i915) >= 12) {
instdone->slice_common_extra[0] =
intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA);
instdone->slice_common_extra[1] =
intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA2);
}
for_each_ss_steering(iter, engine->gt, slice, subslice) {
instdone->sampler[slice][subslice] =
intel_gt_mcr_read(engine->gt,
GEN8_SAMPLER_INSTDONE,
slice, subslice);
instdone->row[slice][subslice] =
intel_gt_mcr_read(engine->gt,
GEN8_ROW_INSTDONE,
slice, subslice);
}
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
for_each_ss_steering(iter, engine->gt, slice, subslice)
instdone->geom_svg[slice][subslice] =
intel_gt_mcr_read(engine->gt,
XEHPG_INSTDONE_GEOM_SVG,
slice, subslice);
}
} else if (GRAPHICS_VER(i915) >= 7) {
instdone->instdone =
intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
if (engine->id != RCS0)
return;
instdone->slice_common =
intel_uncore_read(uncore, GEN7_SC_INSTDONE);
instdone->sampler[0][0] =
intel_uncore_read(uncore, GEN7_SAMPLER_INSTDONE);
instdone->row[0][0] =
intel_uncore_read(uncore, GEN7_ROW_INSTDONE);
} else if (GRAPHICS_VER(i915) >= 4) {
instdone->instdone =
intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
if (engine->id == RCS0)
/* HACK: Using the wrong struct member */
instdone->slice_common =
intel_uncore_read(uncore, GEN4_INSTDONE1);
} else {
instdone->instdone = intel_uncore_read(uncore, GEN2_INSTDONE);
}
}
static bool ring_is_idle(struct intel_engine_cs *engine)
{
bool idle = true;
if (I915_SELFTEST_ONLY(!engine->mmio_base))
return true;
if (!intel_engine_pm_get_if_awake(engine))
return true;
/* First check that no commands are left in the ring */
if ((ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) !=
(ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR))
idle = false;
/* No bit for gen2, so assume the CS parser is idle */
if (GRAPHICS_VER(engine->i915) > 2 &&
!(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
idle = false;
intel_engine_pm_put(engine);
return idle;
}
void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync)
{
struct tasklet_struct *t = &engine->sched_engine->tasklet;
if (!t->callback)
return;
local_bh_disable();
if (tasklet_trylock(t)) {
/* Must wait for any GPU reset in progress. */
if (__tasklet_is_enabled(t))
t->callback(t);
tasklet_unlock(t);
}
local_bh_enable();
/* Synchronise and wait for the tasklet on another CPU */
if (sync)
tasklet_unlock_wait(t);
}
/**
* intel_engine_is_idle() - Report if the engine has finished process all work
* @engine: the intel_engine_cs
*
* Return true if there are no requests pending, nothing left to be submitted
* to hardware, and that the engine is idle.
*/
bool intel_engine_is_idle(struct intel_engine_cs *engine)
{
/* More white lies, if wedged, hw state is inconsistent */
if (intel_gt_is_wedged(engine->gt))
return true;
if (!intel_engine_pm_is_awake(engine))
return true;
/* Waiting to drain ELSP? */
intel_synchronize_hardirq(engine->i915);
intel_engine_flush_submission(engine);
/* ELSP is empty, but there are ready requests? E.g. after reset */
if (!i915_sched_engine_is_empty(engine->sched_engine))
return false;
/* Ring stopped? */
return ring_is_idle(engine);
}
bool intel_engines_are_idle(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
/*
* If the driver is wedged, HW state may be very inconsistent and
* report that it is still busy, even though we have stopped using it.
*/
if (intel_gt_is_wedged(gt))
return true;
/* Already parked (and passed an idleness test); must still be idle */
if (!READ_ONCE(gt->awake))
return true;
for_each_engine(engine, gt, id) {
if (!intel_engine_is_idle(engine))
return false;
}
return true;
}
bool intel_engine_irq_enable(struct intel_engine_cs *engine)
{
if (!engine->irq_enable)
return false;
/* Caller disables interrupts */
spin_lock(engine->gt->irq_lock);
engine->irq_enable(engine);
spin_unlock(engine->gt->irq_lock);
return true;
}
void intel_engine_irq_disable(struct intel_engine_cs *engine)
{
if (!engine->irq_disable)
return;
/* Caller disables interrupts */
spin_lock(engine->gt->irq_lock);
engine->irq_disable(engine);
spin_unlock(engine->gt->irq_lock);
}
void intel_engines_reset_default_submission(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, gt, id) {
if (engine->sanitize)
engine->sanitize(engine);
engine->set_default_submission(engine);
}
}
bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
{
switch (GRAPHICS_VER(engine->i915)) {
case 2:
return false; /* uses physical not virtual addresses */
case 3:
/* maybe only uses physical not virtual addresses */
return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
case 4:
return !IS_I965G(engine->i915); /* who knows! */
case 6:
return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
default:
return true;
}
}
static struct intel_timeline *get_timeline(struct i915_request *rq)
{
struct intel_timeline *tl;
/*
* Even though we are holding the engine->sched_engine->lock here, there
* is no control over the submission queue per-se and we are
* inspecting the active state at a random point in time, with an
* unknown queue. Play safe and make sure the timeline remains valid.
* (Only being used for pretty printing, one extra kref shouldn't
* cause a camel stampede!)
*/
rcu_read_lock();
tl = rcu_dereference(rq->timeline);
if (!kref_get_unless_zero(&tl->kref))
tl = NULL;
rcu_read_unlock();
return tl;
}
static int print_ring(char *buf, int sz, struct i915_request *rq)
{
int len = 0;
if (!i915_request_signaled(rq)) {
struct intel_timeline *tl = get_timeline(rq);
len = scnprintf(buf, sz,
"ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
i915_ggtt_offset(rq->ring->vma),
tl ? tl->hwsp_offset : 0,
hwsp_seqno(rq),
DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
1000 * 1000));
if (tl)
intel_timeline_put(tl);
}
return len;
}
static void hexdump(struct drm_printer *m, const void *buf, size_t len)
{
const size_t rowsize = 8 * sizeof(u32);
const void *prev = NULL;
bool skip = false;
size_t pos;
for (pos = 0; pos < len; pos += rowsize) {
char line[128];
if (prev && !memcmp(prev, buf + pos, rowsize)) {
if (!skip) {
drm_printf(m, "*\n");
skip = true;
}
continue;
}
WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
rowsize, sizeof(u32),
line, sizeof(line),
false) >= sizeof(line));
drm_printf(m, "[%04zx] %s\n", pos, line);
prev = buf + pos;
skip = false;
}
}
static const char *repr_timer(const struct timer_list *t)
{
if (!READ_ONCE(t->expires))
return "inactive";
if (timer_pending(t))
return "active";
return "expired";
}
static void intel_engine_print_registers(struct intel_engine_cs *engine,
struct drm_printer *m)
{
struct drm_i915_private *i915 = engine->i915;
struct intel_engine_execlists * const execlists = &engine->execlists;
u64 addr;
if (engine->id == RENDER_CLASS && IS_GRAPHICS_VER(i915, 4, 7))
drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
if (HAS_EXECLISTS(i915)) {
drm_printf(m, "\tEL_STAT_HI: 0x%08x\n",
ENGINE_READ(engine, RING_EXECLIST_STATUS_HI));
drm_printf(m, "\tEL_STAT_LO: 0x%08x\n",
ENGINE_READ(engine, RING_EXECLIST_STATUS_LO));
}
drm_printf(m, "\tRING_START: 0x%08x\n",
ENGINE_READ(engine, RING_START));
drm_printf(m, "\tRING_HEAD: 0x%08x\n",
ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR);
drm_printf(m, "\tRING_TAIL: 0x%08x\n",
ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR);
drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
ENGINE_READ(engine, RING_CTL),
ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
if (GRAPHICS_VER(engine->i915) > 2) {
drm_printf(m, "\tRING_MODE: 0x%08x%s\n",
ENGINE_READ(engine, RING_MI_MODE),
ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
}
if (GRAPHICS_VER(i915) >= 6) {
drm_printf(m, "\tRING_IMR: 0x%08x\n",
ENGINE_READ(engine, RING_IMR));
drm_printf(m, "\tRING_ESR: 0x%08x\n",
ENGINE_READ(engine, RING_ESR));
drm_printf(m, "\tRING_EMR: 0x%08x\n",
ENGINE_READ(engine, RING_EMR));
drm_printf(m, "\tRING_EIR: 0x%08x\n",
ENGINE_READ(engine, RING_EIR));
}
addr = intel_engine_get_active_head(engine);
drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
addr = intel_engine_get_last_batch_head(engine);
drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
if (GRAPHICS_VER(i915) >= 8)
addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
else if (GRAPHICS_VER(i915) >= 4)
addr = ENGINE_READ(engine, RING_DMA_FADD);
else
addr = ENGINE_READ(engine, DMA_FADD_I8XX);
drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
if (GRAPHICS_VER(i915) >= 4) {
drm_printf(m, "\tIPEIR: 0x%08x\n",
ENGINE_READ(engine, RING_IPEIR));
drm_printf(m, "\tIPEHR: 0x%08x\n",
ENGINE_READ(engine, RING_IPEHR));
} else {
drm_printf(m, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine, IPEIR));
drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
}
if (HAS_EXECLISTS(i915) && !intel_engine_uses_guc(engine)) {
struct i915_request * const *port, *rq;
const u32 *hws =
&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
const u8 num_entries = execlists->csb_size;
unsigned int idx;
u8 read, write;
drm_printf(m, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n",
str_yes_no(test_bit(TASKLET_STATE_SCHED, &engine->sched_engine->tasklet.state)),
str_enabled_disabled(!atomic_read(&engine->sched_engine->tasklet.count)),
repr_timer(&engine->execlists.preempt),
repr_timer(&engine->execlists.timer));
read = execlists->csb_head;
write = READ_ONCE(*execlists->csb_write);
drm_printf(m, "\tExeclist status: 0x%08x %08x; CSB read:%d, write:%d, entries:%d\n",
ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
ENGINE_READ(engine, RING_EXECLIST_STATUS_HI),
read, write, num_entries);
if (read >= num_entries)
read = 0;
if (write >= num_entries)
write = 0;
if (read > write)
write += num_entries;
while (read < write) {
idx = ++read % num_entries;
drm_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
idx, hws[idx * 2], hws[idx * 2 + 1]);
}
i915_sched_engine_active_lock_bh(engine->sched_engine);
rcu_read_lock();
for (port = execlists->active; (rq = *port); port++) {
char hdr[160];
int len;
len = scnprintf(hdr, sizeof(hdr),
"\t\tActive[%d]: ccid:%08x%s%s, ",
(int)(port - execlists->active),
rq->context->lrc.ccid,
intel_context_is_closed(rq->context) ? "!" : "",
intel_context_is_banned(rq->context) ? "*" : "");
len += print_ring(hdr + len, sizeof(hdr) - len, rq);
scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
i915_request_show(m, rq, hdr, 0);
}
for (port = execlists->pending; (rq = *port); port++) {
char hdr[160];
int len;
len = scnprintf(hdr, sizeof(hdr),
"\t\tPending[%d]: ccid:%08x%s%s, ",
(int)(port - execlists->pending),
rq->context->lrc.ccid,
intel_context_is_closed(rq->context) ? "!" : "",
intel_context_is_banned(rq->context) ? "*" : "");
len += print_ring(hdr + len, sizeof(hdr) - len, rq);
scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
i915_request_show(m, rq, hdr, 0);
}
rcu_read_unlock();
i915_sched_engine_active_unlock_bh(engine->sched_engine);
} else if (GRAPHICS_VER(i915) > 6) {
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
ENGINE_READ(engine, RING_PP_DIR_BASE));
drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
ENGINE_READ(engine, RING_PP_DIR_BASE_READ));
drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
ENGINE_READ(engine, RING_PP_DIR_DCLV));
}
}
static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
{
struct i915_vma_resource *vma_res = rq->batch_res;
void *ring;
int size;
drm_printf(m,
"[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n",
rq->head, rq->postfix, rq->tail,
vma_res ? upper_32_bits(vma_res->start) : ~0u,
vma_res ? lower_32_bits(vma_res->start) : ~0u);
size = rq->tail - rq->head;
if (rq->tail < rq->head)
size += rq->ring->size;
ring = kmalloc(size, GFP_ATOMIC);
if (ring) {
const void *vaddr = rq->ring->vaddr;
unsigned int head = rq->head;
unsigned int len = 0;
if (rq->tail < head) {
len = rq->ring->size - head;
memcpy(ring, vaddr + head, len);
head = 0;
}
memcpy(ring + len, vaddr + head, size - len);
hexdump(m, ring, size);
kfree(ring);
}
}
static unsigned long read_ul(void *p, size_t x)
{
return *(unsigned long *)(p + x);
}
static void print_properties(struct intel_engine_cs *engine,
struct drm_printer *m)
{
static const struct pmap {
size_t offset;
const char *name;
} props[] = {
#define P(x) { \
.offset = offsetof(typeof(engine->props), x), \
.name = #x \
}
P(heartbeat_interval_ms),
P(max_busywait_duration_ns),
P(preempt_timeout_ms),
P(stop_timeout_ms),
P(timeslice_duration_ms),
{},
#undef P
};
const struct pmap *p;
drm_printf(m, "\tProperties:\n");
for (p = props; p->name; p++)
drm_printf(m, "\t\t%s: %lu [default %lu]\n",
p->name,
read_ul(&engine->props, p->offset),
read_ul(&engine->defaults, p->offset));
}
static void engine_dump_request(struct i915_request *rq, struct drm_printer *m, const char *msg)
{
struct intel_timeline *tl = get_timeline(rq);
i915_request_show(m, rq, msg, 0);
drm_printf(m, "\t\tring->start: 0x%08x\n",
i915_ggtt_offset(rq->ring->vma));
drm_printf(m, "\t\tring->head: 0x%08x\n",
rq->ring->head);
drm_printf(m, "\t\tring->tail: 0x%08x\n",
rq->ring->tail);
drm_printf(m, "\t\tring->emit: 0x%08x\n",
rq->ring->emit);
drm_printf(m, "\t\tring->space: 0x%08x\n",
rq->ring->space);
if (tl) {
drm_printf(m, "\t\tring->hwsp: 0x%08x\n",
tl->hwsp_offset);
intel_timeline_put(tl);
}
print_request_ring(m, rq);
if (rq->context->lrc_reg_state) {
drm_printf(m, "Logical Ring Context:\n");
hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
}
}
void intel_engine_dump_active_requests(struct list_head *requests,
struct i915_request *hung_rq,
struct drm_printer *m)
{
struct i915_request *rq;
const char *msg;
enum i915_request_state state;
list_for_each_entry(rq, requests, sched.link) {
if (rq == hung_rq)
continue;
state = i915_test_request_state(rq);
if (state < I915_REQUEST_QUEUED)
continue;
if (state == I915_REQUEST_ACTIVE)
msg = "\t\tactive on engine";
else
msg = "\t\tactive in queue";
engine_dump_request(rq, m, msg);
}
}
static void engine_dump_active_requests(struct intel_engine_cs *engine,
struct drm_printer *m)
{
struct intel_context *hung_ce = NULL;
struct i915_request *hung_rq = NULL;
/*
* No need for an engine->irq_seqno_barrier() before the seqno reads.
* The GPU is still running so requests are still executing and any
* hardware reads will be out of date by the time they are reported.
* But the intention here is just to report an instantaneous snapshot
* so that's fine.
*/
intel_engine_get_hung_entity(engine, &hung_ce, &hung_rq);
drm_printf(m, "\tRequests:\n");
if (hung_rq)
engine_dump_request(hung_rq, m, "\t\thung");
else if (hung_ce)
drm_printf(m, "\t\tGot hung ce but no hung rq!\n");
if (intel_uc_uses_guc_submission(&engine->gt->uc))
intel_guc_dump_active_requests(engine, hung_rq, m);
else
intel_execlists_dump_active_requests(engine, hung_rq, m);
if (hung_rq)
i915_request_put(hung_rq);
}
void intel_engine_dump(struct intel_engine_cs *engine,
struct drm_printer *m,
const char *header, ...)
{
struct i915_gpu_error * const error = &engine->i915->gpu_error;
struct i915_request *rq;
intel_wakeref_t wakeref;
ktime_t dummy;
if (header) {
va_list ap;
va_start(ap, header);
drm_vprintf(m, header, &ap);
va_end(ap);
}
if (intel_gt_is_wedged(engine->gt))
drm_printf(m, "*** WEDGED ***\n");
drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count));
drm_printf(m, "\tBarriers?: %s\n",
str_yes_no(!llist_empty(&engine->barrier_tasks)));
drm_printf(m, "\tLatency: %luus\n",
ewma__engine_latency_read(&engine->latency));
if (intel_engine_supports_stats(engine))
drm_printf(m, "\tRuntime: %llums\n",
ktime_to_ms(intel_engine_get_busy_time(engine,
&dummy)));
drm_printf(m, "\tForcewake: %x domains, %d active\n",
engine->fw_domain, READ_ONCE(engine->fw_active));
rcu_read_lock();
rq = READ_ONCE(engine->heartbeat.systole);
if (rq)
drm_printf(m, "\tHeartbeat: %d ms ago\n",
jiffies_to_msecs(jiffies - rq->emitted_jiffies));
rcu_read_unlock();
drm_printf(m, "\tReset count: %d (global %d)\n",
i915_reset_engine_count(error, engine),
i915_reset_count(error));
print_properties(engine, m);
engine_dump_active_requests(engine, m);
drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);
wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
if (wakeref) {
intel_engine_print_registers(engine, m);
intel_runtime_pm_put(engine->uncore->rpm, wakeref);
} else {
drm_printf(m, "\tDevice is asleep; skipping register dump\n");
}
intel_execlists_show_requests(engine, m, i915_request_show, 8);
drm_printf(m, "HWSP:\n");
hexdump(m, engine->status_page.addr, PAGE_SIZE);
drm_printf(m, "Idle? %s\n", str_yes_no(intel_engine_is_idle(engine)));
intel_engine_print_breadcrumbs(engine, m);
}
/**
* intel_engine_get_busy_time() - Return current accumulated engine busyness
* @engine: engine to report on
* @now: monotonic timestamp of sampling
*
* Returns accumulated time @engine was busy since engine stats were enabled.
*/
ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
{
return engine->busyness(engine, now);
}
struct intel_context *
intel_engine_create_virtual(struct intel_engine_cs **siblings,
unsigned int count, unsigned long flags)
{
if (count == 0)
return ERR_PTR(-EINVAL);
if (count == 1 && !(flags & FORCE_VIRTUAL))
return intel_context_create(siblings[0]);
GEM_BUG_ON(!siblings[0]->cops->create_virtual);
return siblings[0]->cops->create_virtual(siblings, count, flags);
}
static struct i915_request *engine_execlist_find_hung_request(struct intel_engine_cs *engine)
{
struct i915_request *request, *active = NULL;
/*
* This search does not work in GuC submission mode. However, the GuC
* will report the hanging context directly to the driver itself. So
* the driver should never get here when in GuC mode.
*/
GEM_BUG_ON(intel_uc_uses_guc_submission(&engine->gt->uc));
/*
* We are called by the error capture, reset and to dump engine
* state at random points in time. In particular, note that neither is
* crucially ordered with an interrupt. After a hang, the GPU is dead
* and we assume that no more writes can happen (we waited long enough
* for all writes that were in transaction to be flushed) - adding an
* extra delay for a recent interrupt is pointless. Hence, we do
* not need an engine->irq_seqno_barrier() before the seqno reads.
* At all other times, we must assume the GPU is still running, but
* we only care about the snapshot of this moment.
*/
lockdep_assert_held(&engine->sched_engine->lock);
rcu_read_lock();
request = execlists_active(&engine->execlists);
if (request) {
struct intel_timeline *tl = request->context->timeline;
list_for_each_entry_from_reverse(request, &tl->requests, link) {
if (__i915_request_is_complete(request))
break;
active = request;
}
}
rcu_read_unlock();
if (active)
return active;
list_for_each_entry(request, &engine->sched_engine->requests,
sched.link) {
if (i915_test_request_state(request) != I915_REQUEST_ACTIVE)
continue;
active = request;
break;
}
return active;
}
void intel_engine_get_hung_entity(struct intel_engine_cs *engine,
struct intel_context **ce, struct i915_request **rq)
{
unsigned long flags;
*ce = intel_engine_get_hung_context(engine);
if (*ce) {
intel_engine_clear_hung_context(engine);
*rq = intel_context_get_active_request(*ce);
return;
}
/*
* Getting here with GuC enabled means it is a forced error capture
* with no actual hang. So, no need to attempt the execlist search.
*/
if (intel_uc_uses_guc_submission(&engine->gt->uc))
return;
spin_lock_irqsave(&engine->sched_engine->lock, flags);
*rq = engine_execlist_find_hung_request(engine);
if (*rq)
*rq = i915_request_get_rcu(*rq);
spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
}
void xehp_enable_ccs_engines(struct intel_engine_cs *engine)
{
/*
* If there are any non-fused-off CCS engines, we need to enable CCS
* support in the RCU_MODE register. This only needs to be done once,
* so for simplicity we'll take care of this in the RCS engine's
* resume handler; since the RCS and all CCS engines belong to the
* same reset domain and are reset together, this will also take care
* of re-applying the setting after i915-triggered resets.
*/
if (!CCS_MASK(engine->gt))
return;
intel_uncore_write(engine->uncore, GEN12_RCU_MODE,
_MASKED_BIT_ENABLE(GEN12_RCU_MODE_CCS_ENABLE));
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "mock_engine.c"
#include "selftest_engine.c"
#include "selftest_engine_cs.c"
#endif
| linux-master | drivers/gpu/drm/i915/gt/intel_engine_cs.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2022 Intel Corporation
*/
#include "i915_selftest.h"
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
#include "gen8_engine_cs.h"
#include "i915_gem_ww.h"
#include "intel_engine_regs.h"
#include "intel_gpu_commands.h"
#include "intel_context.h"
#include "intel_gt.h"
#include "intel_ring.h"
#include "selftests/igt_flush_test.h"
#include "selftests/i915_random.h"
static void vma_set_qw(struct i915_vma *vma, u64 addr, u64 val)
{
GEM_BUG_ON(addr < i915_vma_offset(vma));
GEM_BUG_ON(addr >= i915_vma_offset(vma) + i915_vma_size(vma) + sizeof(val));
memset64(page_mask_bits(vma->obj->mm.mapping) +
(addr - i915_vma_offset(vma)), val, 1);
}
static int
pte_tlbinv(struct intel_context *ce,
struct i915_vma *va,
struct i915_vma *vb,
u64 align,
void (*tlbinv)(struct i915_address_space *vm, u64 addr, u64 length),
u64 length,
struct rnd_state *prng)
{
const unsigned int pat_index =
i915_gem_get_pat_index(ce->vm->i915, I915_CACHE_NONE);
struct drm_i915_gem_object *batch;
struct drm_mm_node vb_node;
struct i915_request *rq;
struct i915_vma *vma;
u64 addr;
int err;
u32 *cs;
batch = i915_gem_object_create_internal(ce->vm->i915, 4096);
if (IS_ERR(batch))
return PTR_ERR(batch);
vma = i915_vma_instance(batch, ce->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
}
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err)
goto out;
/* Pin va at random but aligned offset after vma */
addr = round_up(vma->node.start + vma->node.size, align);
/* MI_CONDITIONAL_BATCH_BUFFER_END limits address to 48b */
addr = igt_random_offset(prng, addr, min(ce->vm->total, BIT_ULL(48)),
va->size, align);
err = i915_vma_pin(va, 0, 0, addr | PIN_OFFSET_FIXED | PIN_USER);
if (err) {
pr_err("Cannot pin at %llx+%llx\n", addr, va->size);
goto out;
}
GEM_BUG_ON(i915_vma_offset(va) != addr);
if (vb != va) {
vb_node = vb->node;
vb->node = va->node; /* overwrites the _same_ PTE */
}
/*
* Now choose random dword at the 1st pinned page.
*
* SZ_64K pages on dg1 require that the whole PT be marked
* containing 64KiB entries. So we make sure that vma
* covers the whole PT, despite being randomly aligned to 64KiB
* and restrict our sampling to the 2MiB PT within where
* we know that we will be using 64KiB pages.
*/
if (align == SZ_64K)
addr = round_up(addr, SZ_2M);
addr = igt_random_offset(prng, addr, addr + align, 8, 8);
if (va != vb)
pr_info("%s(%s): Sampling %llx, with alignment %llx, using PTE size %x (phys %x, sg %x), invalidate:%llx+%llx\n",
ce->engine->name, va->obj->mm.region->name ?: "smem",
addr, align, va->resource->page_sizes_gtt,
va->page_sizes.phys, va->page_sizes.sg,
addr & -length, length);
cs = i915_gem_object_pin_map_unlocked(batch, I915_MAP_WC);
*cs++ = MI_NOOP; /* for later termination */
/*
* Sample the target to see if we spot the updated backing store.
* Gen8 VCS compares immediate value with bitwise-and of two
* consecutive DWORDS pointed by addr, other gen/engines compare value
* with DWORD pointed by addr. Moreover we want to exercise DWORD size
* invalidations. To fulfill all these requirements below values
* have been chosen.
*/
*cs++ = MI_CONDITIONAL_BATCH_BUFFER_END | MI_DO_COMPARE | 2;
*cs++ = 0; /* break if *addr == 0 */
*cs++ = lower_32_bits(addr);
*cs++ = upper_32_bits(addr);
vma_set_qw(va, addr, -1);
vma_set_qw(vb, addr, 0);
/* Keep sampling until we get bored */
*cs++ = MI_BATCH_BUFFER_START | BIT(8) | 1;
*cs++ = lower_32_bits(i915_vma_offset(vma));
*cs++ = upper_32_bits(i915_vma_offset(vma));
i915_gem_object_flush_map(batch);
rq = i915_request_create(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_va;
}
err = rq->engine->emit_bb_start(rq, i915_vma_offset(vma), 0, 0);
if (err) {
i915_request_add(rq);
goto out_va;
}
i915_request_get(rq);
i915_request_add(rq);
/* Short sleep to sanitycheck the batch is spinning before we begin */
msleep(10);
if (va == vb) {
if (!i915_request_completed(rq)) {
pr_err("%s(%s): Semaphore sanitycheck failed %llx, with alignment %llx, using PTE size %x (phys %x, sg %x)\n",
ce->engine->name, va->obj->mm.region->name ?: "smem",
addr, align, va->resource->page_sizes_gtt,
va->page_sizes.phys, va->page_sizes.sg);
err = -EIO;
}
} else if (!i915_request_completed(rq)) {
struct i915_vma_resource vb_res = {
.bi.pages = vb->obj->mm.pages,
.bi.page_sizes = vb->obj->mm.page_sizes,
.start = i915_vma_offset(vb),
.vma_size = i915_vma_size(vb)
};
unsigned int pte_flags = 0;
/* Flip the PTE between A and B */
if (i915_gem_object_is_lmem(vb->obj))
pte_flags |= PTE_LM;
ce->vm->insert_entries(ce->vm, &vb_res, pat_index, pte_flags);
/* Flush the PTE update to concurrent HW */
tlbinv(ce->vm, addr & -length, length);
if (wait_for(i915_request_completed(rq), HZ / 2)) {
pr_err("%s: Request did not complete; the COND_BBE did not read the updated PTE\n",
ce->engine->name);
err = -EINVAL;
}
} else {
pr_err("Spinner ended unexpectedly\n");
err = -EIO;
}
i915_request_put(rq);
cs = page_mask_bits(batch->mm.mapping);
*cs = MI_BATCH_BUFFER_END;
wmb();
out_va:
if (vb != va)
vb->node = vb_node;
i915_vma_unpin(va);
if (i915_vma_unbind_unlocked(va))
err = -EIO;
out:
i915_gem_object_put(batch);
return err;
}
static struct drm_i915_gem_object *create_lmem(struct intel_gt *gt)
{
struct intel_memory_region *mr = gt->i915->mm.regions[INTEL_REGION_LMEM_0];
resource_size_t size = SZ_1G;
/*
* Allocation of largest possible page size allows to test all types
* of pages. To succeed with both allocations, especially in case of Small
* BAR, try to allocate no more than quarter of mappable memory.
*/
if (mr && size > mr->io_size / 4)
size = mr->io_size / 4;
return i915_gem_object_create_lmem(gt->i915, size, I915_BO_ALLOC_CONTIGUOUS);
}
static struct drm_i915_gem_object *create_smem(struct intel_gt *gt)
{
/*
* SZ_64K pages require covering the whole 2M PT (gen8 to tgl/dg1).
* While that does not require the whole 2M block to be contiguous
* it is easier to make it so, since we need that for SZ_2M pagees.
* Since we randomly offset the start of the vma, we need a 4M object
* so that there is a 2M range within it is suitable for SZ_64K PTE.
*/
return i915_gem_object_create_internal(gt->i915, SZ_4M);
}
static int
mem_tlbinv(struct intel_gt *gt,
struct drm_i915_gem_object *(*create_fn)(struct intel_gt *),
void (*tlbinv)(struct i915_address_space *vm, u64 addr, u64 length))
{
unsigned int ppgtt_size = RUNTIME_INFO(gt->i915)->ppgtt_size;
struct intel_engine_cs *engine;
struct drm_i915_gem_object *A, *B;
struct i915_ppgtt *ppgtt;
struct i915_vma *va, *vb;
enum intel_engine_id id;
I915_RND_STATE(prng);
void *vaddr;
int err;
/*
* Check that the TLB invalidate is able to revoke an active
* page. We load a page into a spinning COND_BBE loop and then
* remap that page to a new physical address. The old address, and
* so the loop keeps spinning, is retained in the TLB cache until
* we issue an invalidate.
*/
A = create_fn(gt);
if (IS_ERR(A))
return PTR_ERR(A);
vaddr = i915_gem_object_pin_map_unlocked(A, I915_MAP_WC);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto out_a;
}
B = create_fn(gt);
if (IS_ERR(B)) {
err = PTR_ERR(B);
goto out_a;
}
vaddr = i915_gem_object_pin_map_unlocked(B, I915_MAP_WC);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto out_b;
}
GEM_BUG_ON(A->base.size != B->base.size);
if ((A->mm.page_sizes.phys | B->mm.page_sizes.phys) & (A->base.size - 1))
pr_warn("Failed to allocate contiguous pages for size %zx\n",
A->base.size);
ppgtt = i915_ppgtt_create(gt, 0);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_b;
}
va = i915_vma_instance(A, &ppgtt->vm, NULL);
if (IS_ERR(va)) {
err = PTR_ERR(va);
goto out_vm;
}
vb = i915_vma_instance(B, &ppgtt->vm, NULL);
if (IS_ERR(vb)) {
err = PTR_ERR(vb);
goto out_vm;
}
err = 0;
for_each_engine(engine, gt, id) {
struct i915_gem_ww_ctx ww;
struct intel_context *ce;
int bit;
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
break;
}
i915_vm_put(ce->vm);
ce->vm = i915_vm_get(&ppgtt->vm);
for_i915_gem_ww(&ww, err, true)
err = intel_context_pin_ww(ce, &ww);
if (err)
goto err_put;
for_each_set_bit(bit,
(unsigned long *)&RUNTIME_INFO(gt->i915)->page_sizes,
BITS_PER_TYPE(RUNTIME_INFO(gt->i915)->page_sizes)) {
unsigned int len;
if (BIT_ULL(bit) < i915_vm_obj_min_alignment(va->vm, va->obj))
continue;
/* sanitycheck the semaphore wake up */
err = pte_tlbinv(ce, va, va,
BIT_ULL(bit),
NULL, SZ_4K,
&prng);
if (err)
goto err_unpin;
for (len = 2; len <= ppgtt_size; len = min(2 * len, ppgtt_size)) {
err = pte_tlbinv(ce, va, vb,
BIT_ULL(bit),
tlbinv,
BIT_ULL(len),
&prng);
if (err)
goto err_unpin;
if (len == ppgtt_size)
break;
}
}
err_unpin:
intel_context_unpin(ce);
err_put:
intel_context_put(ce);
if (err)
break;
}
if (igt_flush_test(gt->i915))
err = -EIO;
out_vm:
i915_vm_put(&ppgtt->vm);
out_b:
i915_gem_object_put(B);
out_a:
i915_gem_object_put(A);
return err;
}
static void tlbinv_full(struct i915_address_space *vm, u64 addr, u64 length)
{
intel_gt_invalidate_tlb_full(vm->gt, intel_gt_tlb_seqno(vm->gt) | 1);
}
static int invalidate_full(void *arg)
{
struct intel_gt *gt = arg;
int err;
if (GRAPHICS_VER(gt->i915) < 8)
return 0; /* TLB invalidate not implemented */
err = mem_tlbinv(gt, create_smem, tlbinv_full);
if (err == 0)
err = mem_tlbinv(gt, create_lmem, tlbinv_full);
if (err == -ENODEV || err == -ENXIO)
err = 0;
return err;
}
int intel_tlb_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(invalidate_full),
};
struct intel_gt *gt;
unsigned int i;
for_each_gt(gt, i915, i) {
int err;
if (intel_gt_is_wedged(gt))
continue;
err = intel_gt_live_subtests(tests, gt);
if (err)
return err;
}
return 0;
}
| linux-master | drivers/gpu/drm/i915/gt/selftest_tlb.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2014 Intel Corporation
*/
#include "gen8_engine_cs.h"
#include "i915_drv.h"
#include "intel_engine_regs.h"
#include "intel_gpu_commands.h"
#include "intel_lrc.h"
#include "intel_ring.h"
int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode)
{
bool vf_flush_wa = false, dc_flush_wa = false;
u32 *cs, flags = 0;
int len;
flags |= PIPE_CONTROL_CS_STALL;
if (mode & EMIT_FLUSH) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
flags |= PIPE_CONTROL_FLUSH_ENABLE;
}
if (mode & EMIT_INVALIDATE) {
flags |= PIPE_CONTROL_TLB_INVALIDATE;
flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_STORE_DATA_INDEX;
/*
* On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
* pipe control.
*/
if (GRAPHICS_VER(rq->i915) == 9)
vf_flush_wa = true;
/* WaForGAMHang:kbl */
if (IS_KABYLAKE(rq->i915) && IS_GRAPHICS_STEP(rq->i915, 0, STEP_C0))
dc_flush_wa = true;
}
len = 6;
if (vf_flush_wa)
len += 6;
if (dc_flush_wa)
len += 12;
cs = intel_ring_begin(rq, len);
if (IS_ERR(cs))
return PTR_ERR(cs);
if (vf_flush_wa)
cs = gen8_emit_pipe_control(cs, 0, 0);
if (dc_flush_wa)
cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE,
0);
cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
if (dc_flush_wa)
cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0);
intel_ring_advance(rq, cs);
return 0;
}
int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode)
{
u32 cmd, *cs;
cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
cmd = MI_FLUSH_DW + 1;
/*
* We always require a command barrier so that subsequent
* commands, such as breadcrumb interrupts, are strictly ordered
* wrt the contents of the write cache being flushed to memory
* (and thus being coherent from the CPU).
*/
cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
if (mode & EMIT_INVALIDATE) {
cmd |= MI_INVALIDATE_TLB;
if (rq->engine->class == VIDEO_DECODE_CLASS)
cmd |= MI_INVALIDATE_BSD;
}
*cs++ = cmd;
*cs++ = LRC_PPHWSP_SCRATCH_ADDR;
*cs++ = 0; /* upper addr */
*cs++ = 0; /* value */
intel_ring_advance(rq, cs);
return 0;
}
int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode)
{
if (mode & EMIT_FLUSH) {
u32 *cs;
u32 flags = 0;
flags |= PIPE_CONTROL_CS_STALL;
flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
flags |= PIPE_CONTROL_FLUSH_ENABLE;
flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_STORE_DATA_INDEX;
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
intel_ring_advance(rq, cs);
}
if (mode & EMIT_INVALIDATE) {
u32 *cs;
u32 flags = 0;
flags |= PIPE_CONTROL_CS_STALL;
flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TLB_INVALIDATE;
flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_STORE_DATA_INDEX;
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
intel_ring_advance(rq, cs);
}
return 0;
}
static u32 preparser_disable(bool state)
{
return MI_ARB_CHECK | 1 << 8 | state;
}
static i915_reg_t gen12_get_aux_inv_reg(struct intel_engine_cs *engine)
{
switch (engine->id) {
case RCS0:
return GEN12_CCS_AUX_INV;
case BCS0:
return GEN12_BCS0_AUX_INV;
case VCS0:
return GEN12_VD0_AUX_INV;
case VCS2:
return GEN12_VD2_AUX_INV;
case VECS0:
return GEN12_VE0_AUX_INV;
case CCS0:
return GEN12_CCS0_AUX_INV;
default:
return INVALID_MMIO_REG;
}
}
static bool gen12_needs_ccs_aux_inv(struct intel_engine_cs *engine)
{
i915_reg_t reg = gen12_get_aux_inv_reg(engine);
if (IS_PONTEVECCHIO(engine->i915))
return false;
/*
* So far platforms supported by i915 having flat ccs do not require
* AUX invalidation. Check also whether the engine requires it.
*/
return i915_mmio_reg_valid(reg) && !HAS_FLAT_CCS(engine->i915);
}
u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs)
{
i915_reg_t inv_reg = gen12_get_aux_inv_reg(engine);
u32 gsi_offset = engine->gt->uncore->gsi_offset;
if (!gen12_needs_ccs_aux_inv(engine))
return cs;
*cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN;
*cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset;
*cs++ = AUX_INV;
*cs++ = MI_SEMAPHORE_WAIT_TOKEN |
MI_SEMAPHORE_REGISTER_POLL |
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_EQ_SDD;
*cs++ = 0;
*cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset;
*cs++ = 0;
*cs++ = 0;
return cs;
}
static int mtl_dummy_pipe_control(struct i915_request *rq)
{
/* Wa_14016712196 */
if (IS_MTL_GRAPHICS_STEP(rq->i915, M, STEP_A0, STEP_B0) ||
IS_MTL_GRAPHICS_STEP(rq->i915, P, STEP_A0, STEP_B0)) {
u32 *cs;
/* dummy PIPE_CONTROL + depth flush */
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
cs = gen12_emit_pipe_control(cs,
0,
PIPE_CONTROL_DEPTH_CACHE_FLUSH,
LRC_PPHWSP_SCRATCH_ADDR);
intel_ring_advance(rq, cs);
}
return 0;
}
int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
{
struct intel_engine_cs *engine = rq->engine;
/*
* On Aux CCS platforms the invalidation of the Aux
* table requires quiescing memory traffic beforehand
*/
if (mode & EMIT_FLUSH || gen12_needs_ccs_aux_inv(engine)) {
u32 bit_group_0 = 0;
u32 bit_group_1 = 0;
int err;
u32 *cs;
err = mtl_dummy_pipe_control(rq);
if (err)
return err;
bit_group_0 |= PIPE_CONTROL0_HDC_PIPELINE_FLUSH;
/*
* When required, in MTL and beyond platforms we
* need to set the CCS_FLUSH bit in the pipe control
*/
if (GRAPHICS_VER_FULL(rq->i915) >= IP_VER(12, 70))
bit_group_0 |= PIPE_CONTROL_CCS_FLUSH;
bit_group_1 |= PIPE_CONTROL_TILE_CACHE_FLUSH;
bit_group_1 |= PIPE_CONTROL_FLUSH_L3;
bit_group_1 |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
bit_group_1 |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
/* Wa_1409600907:tgl,adl-p */
bit_group_1 |= PIPE_CONTROL_DEPTH_STALL;
bit_group_1 |= PIPE_CONTROL_DC_FLUSH_ENABLE;
bit_group_1 |= PIPE_CONTROL_FLUSH_ENABLE;
bit_group_1 |= PIPE_CONTROL_STORE_DATA_INDEX;
bit_group_1 |= PIPE_CONTROL_QW_WRITE;
bit_group_1 |= PIPE_CONTROL_CS_STALL;
if (!HAS_3D_PIPELINE(engine->i915))
bit_group_1 &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
else if (engine->class == COMPUTE_CLASS)
bit_group_1 &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
cs = gen12_emit_pipe_control(cs, bit_group_0, bit_group_1,
LRC_PPHWSP_SCRATCH_ADDR);
intel_ring_advance(rq, cs);
}
if (mode & EMIT_INVALIDATE) {
u32 flags = 0;
u32 *cs, count;
int err;
err = mtl_dummy_pipe_control(rq);
if (err)
return err;
flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TLB_INVALIDATE;
flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STORE_DATA_INDEX;
flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_CS_STALL;
if (!HAS_3D_PIPELINE(engine->i915))
flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
else if (engine->class == COMPUTE_CLASS)
flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
count = 8;
if (gen12_needs_ccs_aux_inv(rq->engine))
count += 8;
cs = intel_ring_begin(rq, count);
if (IS_ERR(cs))
return PTR_ERR(cs);
/*
* Prevent the pre-parser from skipping past the TLB
* invalidate and loading a stale page for the batch
* buffer / request payload.
*/
*cs++ = preparser_disable(true);
cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
cs = gen12_emit_aux_table_inv(engine, cs);
*cs++ = preparser_disable(false);
intel_ring_advance(rq, cs);
}
return 0;
}
int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
{
u32 cmd = 4;
u32 *cs;
if (mode & EMIT_INVALIDATE) {
cmd += 2;
if (gen12_needs_ccs_aux_inv(rq->engine))
cmd += 8;
}
cs = intel_ring_begin(rq, cmd);
if (IS_ERR(cs))
return PTR_ERR(cs);
if (mode & EMIT_INVALIDATE)
*cs++ = preparser_disable(true);
cmd = MI_FLUSH_DW + 1;
/*
* We always require a command barrier so that subsequent
* commands, such as breadcrumb interrupts, are strictly ordered
* wrt the contents of the write cache being flushed to memory
* (and thus being coherent from the CPU).
*/
cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
if (mode & EMIT_INVALIDATE) {
cmd |= MI_INVALIDATE_TLB;
if (rq->engine->class == VIDEO_DECODE_CLASS)
cmd |= MI_INVALIDATE_BSD;
if (gen12_needs_ccs_aux_inv(rq->engine) &&
rq->engine->class == COPY_ENGINE_CLASS)
cmd |= MI_FLUSH_DW_CCS;
}
*cs++ = cmd;
*cs++ = LRC_PPHWSP_SCRATCH_ADDR;
*cs++ = 0; /* upper addr */
*cs++ = 0; /* value */
cs = gen12_emit_aux_table_inv(rq->engine, cs);
if (mode & EMIT_INVALIDATE)
*cs++ = preparser_disable(false);
intel_ring_advance(rq, cs);
return 0;
}
static u32 preempt_address(struct intel_engine_cs *engine)
{
return (i915_ggtt_offset(engine->status_page.vma) +
I915_GEM_HWS_PREEMPT_ADDR);
}
static u32 hwsp_offset(const struct i915_request *rq)
{
const struct intel_timeline *tl;
/* Before the request is executed, the timeline is fixed */
tl = rcu_dereference_protected(rq->timeline,
!i915_request_signaled(rq));
/* See the comment in i915_request_active_seqno(). */
return page_mask_bits(tl->hwsp_offset) + offset_in_page(rq->hwsp_seqno);
}
int gen8_emit_init_breadcrumb(struct i915_request *rq)
{
u32 *cs;
GEM_BUG_ON(i915_request_has_initial_breadcrumb(rq));
if (!i915_request_timeline(rq)->has_initial_breadcrumb)
return 0;
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = hwsp_offset(rq);
*cs++ = 0;
*cs++ = rq->fence.seqno - 1;
/*
* Check if we have been preempted before we even get started.
*
* After this point i915_request_started() reports true, even if
* we get preempted and so are no longer running.
*
* i915_request_started() is used during preemption processing
* to decide if the request is currently inside the user payload
* or spinning on a kernel semaphore (or earlier). For no-preemption
* requests, we do allow preemption on the semaphore before the user
* payload, but do not allow preemption once the request is started.
*
* i915_request_started() is similarly used during GPU hangs to
* determine if the user's payload was guilty, and if so, the
* request is banned. Before the request is started, it is assumed
* to be unharmed and an innocent victim of another's hang.
*/
*cs++ = MI_NOOP;
*cs++ = MI_ARB_CHECK;
intel_ring_advance(rq, cs);
/* Record the updated position of the request's payload */
rq->infix = intel_ring_offset(rq, cs);
__set_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
return 0;
}
static int __xehp_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
const unsigned int flags,
u32 arb)
{
struct intel_context *ce = rq->context;
u32 wa_offset = lrc_indirect_bb(ce);
u32 *cs;
GEM_BUG_ON(!ce->wa_bb_page);
cs = intel_ring_begin(rq, 12);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_ARB_ON_OFF | arb;
*cs++ = MI_LOAD_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT |
MI_LRI_LRM_CS_MMIO;
*cs++ = i915_mmio_reg_offset(RING_PREDICATE_RESULT(0));
*cs++ = wa_offset + DG2_PREDICATE_RESULT_WA;
*cs++ = 0;
*cs++ = MI_BATCH_BUFFER_START_GEN8 |
(flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
*cs++ = lower_32_bits(offset);
*cs++ = upper_32_bits(offset);
/* Fixup stray MI_SET_PREDICATE as it prevents us executing the ring */
*cs++ = MI_BATCH_BUFFER_START_GEN8;
*cs++ = wa_offset + DG2_PREDICATE_RESULT_BB;
*cs++ = 0;
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
intel_ring_advance(rq, cs);
return 0;
}
int xehp_emit_bb_start_noarb(struct i915_request *rq,
u64 offset, u32 len,
const unsigned int flags)
{
return __xehp_emit_bb_start(rq, offset, len, flags, MI_ARB_DISABLE);
}
int xehp_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
const unsigned int flags)
{
return __xehp_emit_bb_start(rq, offset, len, flags, MI_ARB_ENABLE);
}
int gen8_emit_bb_start_noarb(struct i915_request *rq,
u64 offset, u32 len,
const unsigned int flags)
{
u32 *cs;
cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
/*
* WaDisableCtxRestoreArbitration:bdw,chv
*
* We don't need to perform MI_ARB_ENABLE as often as we do (in
* particular all the gen that do not need the w/a at all!), if we
* took care to make sure that on every switch into this context
* (both ordinary and for preemption) that arbitrartion was enabled
* we would be fine. However, for gen8 there is another w/a that
* requires us to not preempt inside GPGPU execution, so we keep
* arbitration disabled for gen8 batches. Arbitration will be
* re-enabled before we close the request
* (engine->emit_fini_breadcrumb).
*/
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
/* FIXME(BDW+): Address space and security selectors. */
*cs++ = MI_BATCH_BUFFER_START_GEN8 |
(flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
*cs++ = lower_32_bits(offset);
*cs++ = upper_32_bits(offset);
intel_ring_advance(rq, cs);
return 0;
}
int gen8_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
const unsigned int flags)
{
u32 *cs;
if (unlikely(i915_request_has_nopreempt(rq)))
return gen8_emit_bb_start_noarb(rq, offset, len, flags);
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
*cs++ = MI_BATCH_BUFFER_START_GEN8 |
(flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
*cs++ = lower_32_bits(offset);
*cs++ = upper_32_bits(offset);
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
return 0;
}
static void assert_request_valid(struct i915_request *rq)
{
struct intel_ring *ring __maybe_unused = rq->ring;
/* Can we unwind this request without appearing to go forwards? */
GEM_BUG_ON(intel_ring_direction(ring, rq->wa_tail, rq->head) <= 0);
}
/*
* Reserve space for 2 NOOPs at the end of each request to be
* used as a workaround for not being allowed to do lite
* restore with HEAD==TAIL (WaIdleLiteRestore).
*/
static u32 *gen8_emit_wa_tail(struct i915_request *rq, u32 *cs)
{
/* Ensure there's always at least one preemption point per-request. */
*cs++ = MI_ARB_CHECK;
*cs++ = MI_NOOP;
rq->wa_tail = intel_ring_offset(rq, cs);
/* Check that entire request is less than half the ring */
assert_request_valid(rq);
return cs;
}
static u32 *emit_preempt_busywait(struct i915_request *rq, u32 *cs)
{
*cs++ = MI_ARB_CHECK; /* trigger IDLE->ACTIVE first */
*cs++ = MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_EQ_SDD;
*cs++ = 0;
*cs++ = preempt_address(rq->engine);
*cs++ = 0;
*cs++ = MI_NOOP;
return cs;
}
static __always_inline u32*
gen8_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
{
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
if (intel_engine_has_semaphores(rq->engine) &&
!intel_uc_uses_guc_submission(&rq->engine->gt->uc))
cs = emit_preempt_busywait(rq, cs);
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
return gen8_emit_wa_tail(rq, cs);
}
static u32 *emit_xcs_breadcrumb(struct i915_request *rq, u32 *cs)
{
return gen8_emit_ggtt_write(cs, rq->fence.seqno, hwsp_offset(rq), 0);
}
u32 *gen8_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
{
return gen8_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs));
}
u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
{
cs = gen8_emit_pipe_control(cs,
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_TLB_INVALIDATE |
PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
PIPE_CONTROL_DC_FLUSH_ENABLE,
0);
/* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
cs = gen8_emit_ggtt_write_rcs(cs,
rq->fence.seqno,
hwsp_offset(rq),
PIPE_CONTROL_FLUSH_ENABLE |
PIPE_CONTROL_CS_STALL);
return gen8_emit_fini_breadcrumb_tail(rq, cs);
}
u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
{
cs = gen8_emit_pipe_control(cs,
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_TLB_INVALIDATE |
PIPE_CONTROL_TILE_CACHE_FLUSH |
PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
PIPE_CONTROL_DC_FLUSH_ENABLE,
0);
/*XXX: Look at gen8_emit_fini_breadcrumb_rcs */
cs = gen8_emit_ggtt_write_rcs(cs,
rq->fence.seqno,
hwsp_offset(rq),
PIPE_CONTROL_FLUSH_ENABLE |
PIPE_CONTROL_CS_STALL);
return gen8_emit_fini_breadcrumb_tail(rq, cs);
}
/*
* Note that the CS instruction pre-parser will not stall on the breadcrumb
* flush and will continue pre-fetching the instructions after it before the
* memory sync is completed. On pre-gen12 HW, the pre-parser will stop at
* BB_START/END instructions, so, even though we might pre-fetch the pre-amble
* of the next request before the memory has been flushed, we're guaranteed that
* we won't access the batch itself too early.
* However, on gen12+ the parser can pre-fetch across the BB_START/END commands,
* so, if the current request is modifying an instruction in the next request on
* the same intel_context, we might pre-fetch and then execute the pre-update
* instruction. To avoid this, the users of self-modifying code should either
* disable the parser around the code emitting the memory writes, via a new flag
* added to MI_ARB_CHECK, or emit the writes from a different intel_context. For
* the in-kernel use-cases we've opted to use a separate context, see
* reloc_gpu() as an example.
* All the above applies only to the instructions themselves. Non-inline data
* used by the instructions is not pre-fetched.
*/
static u32 *gen12_emit_preempt_busywait(struct i915_request *rq, u32 *cs)
{
*cs++ = MI_ARB_CHECK; /* trigger IDLE->ACTIVE first */
*cs++ = MI_SEMAPHORE_WAIT_TOKEN |
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_EQ_SDD;
*cs++ = 0;
*cs++ = preempt_address(rq->engine);
*cs++ = 0;
*cs++ = 0;
return cs;
}
/* Wa_14014475959:dg2 */
#define CCS_SEMAPHORE_PPHWSP_OFFSET 0x540
static u32 ccs_semaphore_offset(struct i915_request *rq)
{
return i915_ggtt_offset(rq->context->state) +
(LRC_PPHWSP_PN * PAGE_SIZE) + CCS_SEMAPHORE_PPHWSP_OFFSET;
}
/* Wa_14014475959:dg2 */
static u32 *ccs_emit_wa_busywait(struct i915_request *rq, u32 *cs)
{
int i;
*cs++ = MI_ATOMIC_INLINE | MI_ATOMIC_GLOBAL_GTT | MI_ATOMIC_CS_STALL |
MI_ATOMIC_MOVE;
*cs++ = ccs_semaphore_offset(rq);
*cs++ = 0;
*cs++ = 1;
/*
* When MI_ATOMIC_INLINE_DATA set this command must be 11 DW + (1 NOP)
* to align. 4 DWs above + 8 filler DWs here.
*/
for (i = 0; i < 8; ++i)
*cs++ = 0;
*cs++ = MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_EQ_SDD;
*cs++ = 0;
*cs++ = ccs_semaphore_offset(rq);
*cs++ = 0;
return cs;
}
static __always_inline u32*
gen12_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
{
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
if (intel_engine_has_semaphores(rq->engine) &&
!intel_uc_uses_guc_submission(&rq->engine->gt->uc))
cs = gen12_emit_preempt_busywait(rq, cs);
/* Wa_14014475959:dg2 */
if (intel_engine_uses_wa_hold_ccs_switchout(rq->engine))
cs = ccs_emit_wa_busywait(rq, cs);
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
return gen8_emit_wa_tail(rq, cs);
}
u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
{
/* XXX Stalling flush before seqno write; post-sync not */
cs = emit_xcs_breadcrumb(rq, __gen8_emit_flush_dw(cs, 0, 0, 0));
return gen12_emit_fini_breadcrumb_tail(rq, cs);
}
u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
{
struct drm_i915_private *i915 = rq->i915;
u32 flags = (PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_TLB_INVALIDATE |
PIPE_CONTROL_TILE_CACHE_FLUSH |
PIPE_CONTROL_FLUSH_L3 |
PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
PIPE_CONTROL_DC_FLUSH_ENABLE |
PIPE_CONTROL_FLUSH_ENABLE);
/* Wa_14016712196 */
if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0))
/* dummy PIPE_CONTROL + depth flush */
cs = gen12_emit_pipe_control(cs, 0,
PIPE_CONTROL_DEPTH_CACHE_FLUSH, 0);
if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
/* Wa_1409600907 */
flags |= PIPE_CONTROL_DEPTH_STALL;
if (!HAS_3D_PIPELINE(rq->i915))
flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
else if (rq->engine->class == COMPUTE_CLASS)
flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
cs = gen12_emit_pipe_control(cs, PIPE_CONTROL0_HDC_PIPELINE_FLUSH, flags, 0);
/*XXX: Look at gen8_emit_fini_breadcrumb_rcs */
cs = gen12_emit_ggtt_write_rcs(cs,
rq->fence.seqno,
hwsp_offset(rq),
0,
PIPE_CONTROL_FLUSH_ENABLE |
PIPE_CONTROL_CS_STALL);
return gen12_emit_fini_breadcrumb_tail(rq, cs);
}
| linux-master | drivers/gpu/drm/i915/gt/gen8_engine_cs.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2022 Intel Corporation
*/
#include "intel_ggtt_gmch.h"
#include <drm/intel-gtt.h>
#include <linux/agp_backend.h>
#include "i915_drv.h"
#include "i915_utils.h"
#include "intel_gtt.h"
#include "intel_gt_regs.h"
#include "intel_gt.h"
static void gmch_ggtt_insert_page(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
unsigned int pat_index,
u32 unused)
{
unsigned int flags = (pat_index == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
intel_gmch_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
}
static void gmch_ggtt_insert_entries(struct i915_address_space *vm,
struct i915_vma_resource *vma_res,
unsigned int pat_index,
u32 unused)
{
unsigned int flags = (pat_index == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
intel_gmch_gtt_insert_sg_entries(vma_res->bi.pages, vma_res->start >> PAGE_SHIFT,
flags);
}
static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
{
intel_gmch_gtt_flush();
}
static void gmch_ggtt_clear_range(struct i915_address_space *vm,
u64 start, u64 length)
{
intel_gmch_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
}
static void gmch_ggtt_remove(struct i915_address_space *vm)
{
intel_gmch_remove();
}
/*
* Certain Gen5 chipsets require idling the GPU before unmapping anything from
* the GTT when VT-d is enabled.
*/
static bool needs_idle_maps(struct drm_i915_private *i915)
{
/*
* Query intel_iommu to see if we need the workaround. Presumably that
* was loaded first.
*/
if (!i915_vtd_active(i915))
return false;
if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915))
return true;
return false;
}
int intel_ggtt_gmch_probe(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
phys_addr_t gmadr_base;
int ret;
ret = intel_gmch_probe(i915->gmch.pdev, to_pci_dev(i915->drm.dev), NULL);
if (!ret) {
drm_err(&i915->drm, "failed to set up gmch\n");
return -EIO;
}
intel_gmch_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
ggtt->gmadr = DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
if (needs_idle_maps(i915)) {
drm_notice(&i915->drm,
"Flushing DMA requests before IOMMU unmaps; performance may be degraded\n");
ggtt->do_idle_maps = true;
}
ggtt->vm.insert_page = gmch_ggtt_insert_page;
ggtt->vm.insert_entries = gmch_ggtt_insert_entries;
ggtt->vm.clear_range = gmch_ggtt_clear_range;
ggtt->vm.scratch_range = gmch_ggtt_clear_range;
ggtt->vm.cleanup = gmch_ggtt_remove;
ggtt->invalidate = gmch_ggtt_invalidate;
ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
if (unlikely(ggtt->do_idle_maps))
drm_notice(&i915->drm,
"Applying Ironlake quirks for intel_iommu\n");
return 0;
}
int intel_ggtt_gmch_enable_hw(struct drm_i915_private *i915)
{
if (!intel_gmch_enable_gtt())
return -EIO;
return 0;
}
void intel_ggtt_gmch_flush(void)
{
intel_gmch_gtt_flush();
}
| linux-master | drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2014 Intel Corporation
*/
#include "gem/i915_gem_lmem.h"
#include "gen8_engine_cs.h"
#include "i915_drv.h"
#include "i915_perf.h"
#include "i915_reg.h"
#include "intel_context.h"
#include "intel_engine.h"
#include "intel_engine_regs.h"
#include "intel_gpu_commands.h"
#include "intel_gt.h"
#include "intel_gt_regs.h"
#include "intel_lrc.h"
#include "intel_lrc_reg.h"
#include "intel_ring.h"
#include "shmem_utils.h"
/*
* The per-platform tables are u8-encoded in @data. Decode @data and set the
* addresses' offset and commands in @regs. The following encoding is used
* for each byte. There are 2 steps: decoding commands and decoding addresses.
*
* Commands:
* [7]: create NOPs - number of NOPs are set in lower bits
* [6]: When creating MI_LOAD_REGISTER_IMM command, allow to set
* MI_LRI_FORCE_POSTED
* [5:0]: Number of NOPs or registers to set values to in case of
* MI_LOAD_REGISTER_IMM
*
* Addresses: these are decoded after a MI_LOAD_REGISTER_IMM command by "count"
* number of registers. They are set by using the REG/REG16 macros: the former
* is used for offsets smaller than 0x200 while the latter is for values bigger
* than that. Those macros already set all the bits documented below correctly:
*
* [7]: When a register offset needs more than 6 bits, use additional bytes, to
* follow, for the lower bits
* [6:0]: Register offset, without considering the engine base.
*
* This function only tweaks the commands and register offsets. Values are not
* filled out.
*/
static void set_offsets(u32 *regs,
const u8 *data,
const struct intel_engine_cs *engine,
bool close)
#define NOP(x) (BIT(7) | (x))
#define LRI(count, flags) ((flags) << 6 | (count) | BUILD_BUG_ON_ZERO(count >= BIT(6)))
#define POSTED BIT(0)
#define REG(x) (((x) >> 2) | BUILD_BUG_ON_ZERO(x >= 0x200))
#define REG16(x) \
(((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \
(((x) >> 2) & 0x7f)
#define END 0
{
const u32 base = engine->mmio_base;
while (*data) {
u8 count, flags;
if (*data & BIT(7)) { /* skip */
count = *data++ & ~BIT(7);
regs += count;
continue;
}
count = *data & 0x3f;
flags = *data >> 6;
data++;
*regs = MI_LOAD_REGISTER_IMM(count);
if (flags & POSTED)
*regs |= MI_LRI_FORCE_POSTED;
if (GRAPHICS_VER(engine->i915) >= 11)
*regs |= MI_LRI_LRM_CS_MMIO;
regs++;
GEM_BUG_ON(!count);
do {
u32 offset = 0;
u8 v;
do {
v = *data++;
offset <<= 7;
offset |= v & ~BIT(7);
} while (v & BIT(7));
regs[0] = base + (offset << 2);
regs += 2;
} while (--count);
}
if (close) {
/* Close the batch; used mainly by live_lrc_layout() */
*regs = MI_BATCH_BUFFER_END;
if (GRAPHICS_VER(engine->i915) >= 11)
*regs |= BIT(0);
}
}
static const u8 gen8_xcs_offsets[] = {
NOP(1),
LRI(11, 0),
REG16(0x244),
REG(0x034),
REG(0x030),
REG(0x038),
REG(0x03c),
REG(0x168),
REG(0x140),
REG(0x110),
REG(0x11c),
REG(0x114),
REG(0x118),
NOP(9),
LRI(9, 0),
REG16(0x3a8),
REG16(0x28c),
REG16(0x288),
REG16(0x284),
REG16(0x280),
REG16(0x27c),
REG16(0x278),
REG16(0x274),
REG16(0x270),
NOP(13),
LRI(2, 0),
REG16(0x200),
REG(0x028),
END
};
static const u8 gen9_xcs_offsets[] = {
NOP(1),
LRI(14, POSTED),
REG16(0x244),
REG(0x034),
REG(0x030),
REG(0x038),
REG(0x03c),
REG(0x168),
REG(0x140),
REG(0x110),
REG(0x11c),
REG(0x114),
REG(0x118),
REG(0x1c0),
REG(0x1c4),
REG(0x1c8),
NOP(3),
LRI(9, POSTED),
REG16(0x3a8),
REG16(0x28c),
REG16(0x288),
REG16(0x284),
REG16(0x280),
REG16(0x27c),
REG16(0x278),
REG16(0x274),
REG16(0x270),
NOP(13),
LRI(1, POSTED),
REG16(0x200),
NOP(13),
LRI(44, POSTED),
REG(0x028),
REG(0x09c),
REG(0x0c0),
REG(0x178),
REG(0x17c),
REG16(0x358),
REG(0x170),
REG(0x150),
REG(0x154),
REG(0x158),
REG16(0x41c),
REG16(0x600),
REG16(0x604),
REG16(0x608),
REG16(0x60c),
REG16(0x610),
REG16(0x614),
REG16(0x618),
REG16(0x61c),
REG16(0x620),
REG16(0x624),
REG16(0x628),
REG16(0x62c),
REG16(0x630),
REG16(0x634),
REG16(0x638),
REG16(0x63c),
REG16(0x640),
REG16(0x644),
REG16(0x648),
REG16(0x64c),
REG16(0x650),
REG16(0x654),
REG16(0x658),
REG16(0x65c),
REG16(0x660),
REG16(0x664),
REG16(0x668),
REG16(0x66c),
REG16(0x670),
REG16(0x674),
REG16(0x678),
REG16(0x67c),
REG(0x068),
END
};
static const u8 gen12_xcs_offsets[] = {
NOP(1),
LRI(13, POSTED),
REG16(0x244),
REG(0x034),
REG(0x030),
REG(0x038),
REG(0x03c),
REG(0x168),
REG(0x140),
REG(0x110),
REG(0x1c0),
REG(0x1c4),
REG(0x1c8),
REG(0x180),
REG16(0x2b4),
NOP(5),
LRI(9, POSTED),
REG16(0x3a8),
REG16(0x28c),
REG16(0x288),
REG16(0x284),
REG16(0x280),
REG16(0x27c),
REG16(0x278),
REG16(0x274),
REG16(0x270),
END
};
static const u8 dg2_xcs_offsets[] = {
NOP(1),
LRI(15, POSTED),
REG16(0x244),
REG(0x034),
REG(0x030),
REG(0x038),
REG(0x03c),
REG(0x168),
REG(0x140),
REG(0x110),
REG(0x1c0),
REG(0x1c4),
REG(0x1c8),
REG(0x180),
REG16(0x2b4),
REG(0x120),
REG(0x124),
NOP(1),
LRI(9, POSTED),
REG16(0x3a8),
REG16(0x28c),
REG16(0x288),
REG16(0x284),
REG16(0x280),
REG16(0x27c),
REG16(0x278),
REG16(0x274),
REG16(0x270),
END
};
static const u8 gen8_rcs_offsets[] = {
NOP(1),
LRI(14, POSTED),
REG16(0x244),
REG(0x034),
REG(0x030),
REG(0x038),
REG(0x03c),
REG(0x168),
REG(0x140),
REG(0x110),
REG(0x11c),
REG(0x114),
REG(0x118),
REG(0x1c0),
REG(0x1c4),
REG(0x1c8),
NOP(3),
LRI(9, POSTED),
REG16(0x3a8),
REG16(0x28c),
REG16(0x288),
REG16(0x284),
REG16(0x280),
REG16(0x27c),
REG16(0x278),
REG16(0x274),
REG16(0x270),
NOP(13),
LRI(1, 0),
REG(0x0c8),
END
};
static const u8 gen9_rcs_offsets[] = {
NOP(1),
LRI(14, POSTED),
REG16(0x244),
REG(0x34),
REG(0x30),
REG(0x38),
REG(0x3c),
REG(0x168),
REG(0x140),
REG(0x110),
REG(0x11c),
REG(0x114),
REG(0x118),
REG(0x1c0),
REG(0x1c4),
REG(0x1c8),
NOP(3),
LRI(9, POSTED),
REG16(0x3a8),
REG16(0x28c),
REG16(0x288),
REG16(0x284),
REG16(0x280),
REG16(0x27c),
REG16(0x278),
REG16(0x274),
REG16(0x270),
NOP(13),
LRI(1, 0),
REG(0xc8),
NOP(13),
LRI(44, POSTED),
REG(0x28),
REG(0x9c),
REG(0xc0),
REG(0x178),
REG(0x17c),
REG16(0x358),
REG(0x170),
REG(0x150),
REG(0x154),
REG(0x158),
REG16(0x41c),
REG16(0x600),
REG16(0x604),
REG16(0x608),
REG16(0x60c),
REG16(0x610),
REG16(0x614),
REG16(0x618),
REG16(0x61c),
REG16(0x620),
REG16(0x624),
REG16(0x628),
REG16(0x62c),
REG16(0x630),
REG16(0x634),
REG16(0x638),
REG16(0x63c),
REG16(0x640),
REG16(0x644),
REG16(0x648),
REG16(0x64c),
REG16(0x650),
REG16(0x654),
REG16(0x658),
REG16(0x65c),
REG16(0x660),
REG16(0x664),
REG16(0x668),
REG16(0x66c),
REG16(0x670),
REG16(0x674),
REG16(0x678),
REG16(0x67c),
REG(0x68),
END
};
static const u8 gen11_rcs_offsets[] = {
NOP(1),
LRI(15, POSTED),
REG16(0x244),
REG(0x034),
REG(0x030),
REG(0x038),
REG(0x03c),
REG(0x168),
REG(0x140),
REG(0x110),
REG(0x11c),
REG(0x114),
REG(0x118),
REG(0x1c0),
REG(0x1c4),
REG(0x1c8),
REG(0x180),
NOP(1),
LRI(9, POSTED),
REG16(0x3a8),
REG16(0x28c),
REG16(0x288),
REG16(0x284),
REG16(0x280),
REG16(0x27c),
REG16(0x278),
REG16(0x274),
REG16(0x270),
LRI(1, POSTED),
REG(0x1b0),
NOP(10),
LRI(1, 0),
REG(0x0c8),
END
};
static const u8 gen12_rcs_offsets[] = {
NOP(1),
LRI(13, POSTED),
REG16(0x244),
REG(0x034),
REG(0x030),
REG(0x038),
REG(0x03c),
REG(0x168),
REG(0x140),
REG(0x110),
REG(0x1c0),
REG(0x1c4),
REG(0x1c8),
REG(0x180),
REG16(0x2b4),
NOP(5),
LRI(9, POSTED),
REG16(0x3a8),
REG16(0x28c),
REG16(0x288),
REG16(0x284),
REG16(0x280),
REG16(0x27c),
REG16(0x278),
REG16(0x274),
REG16(0x270),
LRI(3, POSTED),
REG(0x1b0),
REG16(0x5a8),
REG16(0x5ac),
NOP(6),
LRI(1, 0),
REG(0x0c8),
NOP(3 + 9 + 1),
LRI(51, POSTED),
REG16(0x588),
REG16(0x588),
REG16(0x588),
REG16(0x588),
REG16(0x588),
REG16(0x588),
REG(0x028),
REG(0x09c),
REG(0x0c0),
REG(0x178),
REG(0x17c),
REG16(0x358),
REG(0x170),
REG(0x150),
REG(0x154),
REG(0x158),
REG16(0x41c),
REG16(0x600),
REG16(0x604),
REG16(0x608),
REG16(0x60c),
REG16(0x610),
REG16(0x614),
REG16(0x618),
REG16(0x61c),
REG16(0x620),
REG16(0x624),
REG16(0x628),
REG16(0x62c),
REG16(0x630),
REG16(0x634),
REG16(0x638),
REG16(0x63c),
REG16(0x640),
REG16(0x644),
REG16(0x648),
REG16(0x64c),
REG16(0x650),
REG16(0x654),
REG16(0x658),
REG16(0x65c),
REG16(0x660),
REG16(0x664),
REG16(0x668),
REG16(0x66c),
REG16(0x670),
REG16(0x674),
REG16(0x678),
REG16(0x67c),
REG(0x068),
REG(0x084),
NOP(1),
END
};
static const u8 xehp_rcs_offsets[] = {
NOP(1),
LRI(13, POSTED),
REG16(0x244),
REG(0x034),
REG(0x030),
REG(0x038),
REG(0x03c),
REG(0x168),
REG(0x140),
REG(0x110),
REG(0x1c0),
REG(0x1c4),
REG(0x1c8),
REG(0x180),
REG16(0x2b4),
NOP(5),
LRI(9, POSTED),
REG16(0x3a8),
REG16(0x28c),
REG16(0x288),
REG16(0x284),
REG16(0x280),
REG16(0x27c),
REG16(0x278),
REG16(0x274),
REG16(0x270),
LRI(3, POSTED),
REG(0x1b0),
REG16(0x5a8),
REG16(0x5ac),
NOP(6),
LRI(1, 0),
REG(0x0c8),
END
};
static const u8 dg2_rcs_offsets[] = {
NOP(1),
LRI(15, POSTED),
REG16(0x244),
REG(0x034),
REG(0x030),
REG(0x038),
REG(0x03c),
REG(0x168),
REG(0x140),
REG(0x110),
REG(0x1c0),
REG(0x1c4),
REG(0x1c8),
REG(0x180),
REG16(0x2b4),
REG(0x120),
REG(0x124),
NOP(1),
LRI(9, POSTED),
REG16(0x3a8),
REG16(0x28c),
REG16(0x288),
REG16(0x284),
REG16(0x280),
REG16(0x27c),
REG16(0x278),
REG16(0x274),
REG16(0x270),
LRI(3, POSTED),
REG(0x1b0),
REG16(0x5a8),
REG16(0x5ac),
NOP(6),
LRI(1, 0),
REG(0x0c8),
END
};
static const u8 mtl_rcs_offsets[] = {
NOP(1),
LRI(15, POSTED),
REG16(0x244),
REG(0x034),
REG(0x030),
REG(0x038),
REG(0x03c),
REG(0x168),
REG(0x140),
REG(0x110),
REG(0x1c0),
REG(0x1c4),
REG(0x1c8),
REG(0x180),
REG16(0x2b4),
REG(0x120),
REG(0x124),
NOP(1),
LRI(9, POSTED),
REG16(0x3a8),
REG16(0x28c),
REG16(0x288),
REG16(0x284),
REG16(0x280),
REG16(0x27c),
REG16(0x278),
REG16(0x274),
REG16(0x270),
NOP(2),
LRI(2, POSTED),
REG16(0x5a8),
REG16(0x5ac),
NOP(6),
LRI(1, 0),
REG(0x0c8),
END
};
#undef END
#undef REG16
#undef REG
#undef LRI
#undef NOP
static const u8 *reg_offsets(const struct intel_engine_cs *engine)
{
/*
* The gen12+ lists only have the registers we program in the basic
* default state. We rely on the context image using relative
* addressing to automatic fixup the register state between the
* physical engines for virtual engine.
*/
GEM_BUG_ON(GRAPHICS_VER(engine->i915) >= 12 &&
!intel_engine_has_relative_mmio(engine));
if (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE) {
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 70))
return mtl_rcs_offsets;
else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
return dg2_rcs_offsets;
else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
return xehp_rcs_offsets;
else if (GRAPHICS_VER(engine->i915) >= 12)
return gen12_rcs_offsets;
else if (GRAPHICS_VER(engine->i915) >= 11)
return gen11_rcs_offsets;
else if (GRAPHICS_VER(engine->i915) >= 9)
return gen9_rcs_offsets;
else
return gen8_rcs_offsets;
} else {
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
return dg2_xcs_offsets;
else if (GRAPHICS_VER(engine->i915) >= 12)
return gen12_xcs_offsets;
else if (GRAPHICS_VER(engine->i915) >= 9)
return gen9_xcs_offsets;
else
return gen8_xcs_offsets;
}
}
static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
{
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
return 0x70;
else if (GRAPHICS_VER(engine->i915) >= 12)
return 0x60;
else if (GRAPHICS_VER(engine->i915) >= 9)
return 0x54;
else if (engine->class == RENDER_CLASS)
return 0x58;
else
return -1;
}
static int lrc_ring_bb_offset(const struct intel_engine_cs *engine)
{
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
return 0x80;
else if (GRAPHICS_VER(engine->i915) >= 12)
return 0x70;
else if (GRAPHICS_VER(engine->i915) >= 9)
return 0x64;
else if (GRAPHICS_VER(engine->i915) >= 8 &&
engine->class == RENDER_CLASS)
return 0xc4;
else
return -1;
}
static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
{
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
return 0x84;
else if (GRAPHICS_VER(engine->i915) >= 12)
return 0x74;
else if (GRAPHICS_VER(engine->i915) >= 9)
return 0x68;
else if (engine->class == RENDER_CLASS)
return 0xd8;
else
return -1;
}
static int lrc_ring_wa_bb_per_ctx(const struct intel_engine_cs *engine)
{
if (GRAPHICS_VER(engine->i915) >= 12)
return 0x12;
else if (GRAPHICS_VER(engine->i915) >= 9 || engine->class == RENDER_CLASS)
return 0x18;
else
return -1;
}
static int lrc_ring_indirect_ptr(const struct intel_engine_cs *engine)
{
int x;
x = lrc_ring_wa_bb_per_ctx(engine);
if (x < 0)
return x;
return x + 2;
}
static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine)
{
int x;
x = lrc_ring_indirect_ptr(engine);
if (x < 0)
return x;
return x + 2;
}
static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
{
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
/*
* Note that the CSFE context has a dummy slot for CMD_BUF_CCTL
* simply to match the RCS context image layout.
*/
return 0xc6;
else if (engine->class != RENDER_CLASS)
return -1;
else if (GRAPHICS_VER(engine->i915) >= 12)
return 0xb6;
else if (GRAPHICS_VER(engine->i915) >= 11)
return 0xaa;
else
return -1;
}
static u32
lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine)
{
if (GRAPHICS_VER(engine->i915) >= 12)
return GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
else if (GRAPHICS_VER(engine->i915) >= 11)
return GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
else if (GRAPHICS_VER(engine->i915) >= 9)
return GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
else if (GRAPHICS_VER(engine->i915) >= 8)
return GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
GEM_BUG_ON(GRAPHICS_VER(engine->i915) < 8);
return 0;
}
static void
lrc_setup_indirect_ctx(u32 *regs,
const struct intel_engine_cs *engine,
u32 ctx_bb_ggtt_addr,
u32 size)
{
GEM_BUG_ON(!size);
GEM_BUG_ON(!IS_ALIGNED(size, CACHELINE_BYTES));
GEM_BUG_ON(lrc_ring_indirect_ptr(engine) == -1);
regs[lrc_ring_indirect_ptr(engine) + 1] =
ctx_bb_ggtt_addr | (size / CACHELINE_BYTES);
GEM_BUG_ON(lrc_ring_indirect_offset(engine) == -1);
regs[lrc_ring_indirect_offset(engine) + 1] =
lrc_ring_indirect_offset_default(engine) << 6;
}
static void init_common_regs(u32 * const regs,
const struct intel_context *ce,
const struct intel_engine_cs *engine,
bool inhibit)
{
u32 ctl;
int loc;
ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
if (inhibit)
ctl |= CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT;
if (GRAPHICS_VER(engine->i915) < 11)
ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
CTX_CTRL_RS_CTX_ENABLE);
regs[CTX_CONTEXT_CONTROL] = ctl;
regs[CTX_TIMESTAMP] = ce->stats.runtime.last;
loc = lrc_ring_bb_offset(engine);
if (loc != -1)
regs[loc + 1] = 0;
}
static void init_wa_bb_regs(u32 * const regs,
const struct intel_engine_cs *engine)
{
const struct i915_ctx_workarounds * const wa_ctx = &engine->wa_ctx;
if (wa_ctx->per_ctx.size) {
const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
GEM_BUG_ON(lrc_ring_wa_bb_per_ctx(engine) == -1);
regs[lrc_ring_wa_bb_per_ctx(engine) + 1] =
(ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
}
if (wa_ctx->indirect_ctx.size) {
lrc_setup_indirect_ctx(regs, engine,
i915_ggtt_offset(wa_ctx->vma) +
wa_ctx->indirect_ctx.offset,
wa_ctx->indirect_ctx.size);
}
}
static void init_ppgtt_regs(u32 *regs, const struct i915_ppgtt *ppgtt)
{
if (i915_vm_is_4lvl(&ppgtt->vm)) {
/* 64b PPGTT (48bit canonical)
* PDP0_DESCRIPTOR contains the base address to PML4 and
* other PDP Descriptors are ignored.
*/
ASSIGN_CTX_PML4(ppgtt, regs);
} else {
ASSIGN_CTX_PDP(ppgtt, regs, 3);
ASSIGN_CTX_PDP(ppgtt, regs, 2);
ASSIGN_CTX_PDP(ppgtt, regs, 1);
ASSIGN_CTX_PDP(ppgtt, regs, 0);
}
}
static struct i915_ppgtt *vm_alias(struct i915_address_space *vm)
{
if (i915_is_ggtt(vm))
return i915_vm_to_ggtt(vm)->alias;
else
return i915_vm_to_ppgtt(vm);
}
static void __reset_stop_ring(u32 *regs, const struct intel_engine_cs *engine)
{
int x;
x = lrc_ring_mi_mode(engine);
if (x != -1) {
regs[x + 1] &= ~STOP_RING;
regs[x + 1] |= STOP_RING << 16;
}
}
static void __lrc_init_regs(u32 *regs,
const struct intel_context *ce,
const struct intel_engine_cs *engine,
bool inhibit)
{
/*
* A context is actually a big batch buffer with several
* MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
* values we are setting here are only for the first context restore:
* on a subsequent save, the GPU will recreate this batchbuffer with new
* values (including all the missing MI_LOAD_REGISTER_IMM commands that
* we are not initializing here).
*
* Must keep consistent with virtual_update_register_offsets().
*/
if (inhibit)
memset(regs, 0, PAGE_SIZE);
set_offsets(regs, reg_offsets(engine), engine, inhibit);
init_common_regs(regs, ce, engine, inhibit);
init_ppgtt_regs(regs, vm_alias(ce->vm));
init_wa_bb_regs(regs, engine);
__reset_stop_ring(regs, engine);
}
void lrc_init_regs(const struct intel_context *ce,
const struct intel_engine_cs *engine,
bool inhibit)
{
__lrc_init_regs(ce->lrc_reg_state, ce, engine, inhibit);
}
void lrc_reset_regs(const struct intel_context *ce,
const struct intel_engine_cs *engine)
{
__reset_stop_ring(ce->lrc_reg_state, engine);
}
static void
set_redzone(void *vaddr, const struct intel_engine_cs *engine)
{
if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
return;
vaddr += engine->context_size;
memset(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE);
}
static void
check_redzone(const void *vaddr, const struct intel_engine_cs *engine)
{
if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
return;
vaddr += engine->context_size;
if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE))
drm_err_once(&engine->i915->drm,
"%s context redzone overwritten!\n",
engine->name);
}
static u32 context_wa_bb_offset(const struct intel_context *ce)
{
return PAGE_SIZE * ce->wa_bb_page;
}
static u32 *context_indirect_bb(const struct intel_context *ce)
{
void *ptr;
GEM_BUG_ON(!ce->wa_bb_page);
ptr = ce->lrc_reg_state;
ptr -= LRC_STATE_OFFSET; /* back to start of context image */
ptr += context_wa_bb_offset(ce);
return ptr;
}
void lrc_init_state(struct intel_context *ce,
struct intel_engine_cs *engine,
void *state)
{
bool inhibit = true;
set_redzone(state, engine);
if (engine->default_state) {
shmem_read(engine->default_state, 0,
state, engine->context_size);
__set_bit(CONTEXT_VALID_BIT, &ce->flags);
inhibit = false;
}
/* Clear the ppHWSP (inc. per-context counters) */
memset(state, 0, PAGE_SIZE);
/* Clear the indirect wa and storage */
if (ce->wa_bb_page)
memset(state + context_wa_bb_offset(ce), 0, PAGE_SIZE);
/*
* The second page of the context object contains some registers which
* must be set up prior to the first execution.
*/
__lrc_init_regs(state + LRC_STATE_OFFSET, ce, engine, inhibit);
}
u32 lrc_indirect_bb(const struct intel_context *ce)
{
return i915_ggtt_offset(ce->state) + context_wa_bb_offset(ce);
}
static u32 *setup_predicate_disable_wa(const struct intel_context *ce, u32 *cs)
{
/* If predication is active, this will be noop'ed */
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT | (4 - 2);
*cs++ = lrc_indirect_bb(ce) + DG2_PREDICATE_RESULT_WA;
*cs++ = 0;
*cs++ = 0; /* No predication */
/* predicated end, only terminates if SET_PREDICATE_RESULT:0 is clear */
*cs++ = MI_BATCH_BUFFER_END | BIT(15);
*cs++ = MI_SET_PREDICATE | MI_SET_PREDICATE_DISABLE;
/* Instructions are no longer predicated (disabled), we can proceed */
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT | (4 - 2);
*cs++ = lrc_indirect_bb(ce) + DG2_PREDICATE_RESULT_WA;
*cs++ = 0;
*cs++ = 1; /* enable predication before the next BB */
*cs++ = MI_BATCH_BUFFER_END;
GEM_BUG_ON(offset_in_page(cs) > DG2_PREDICATE_RESULT_WA);
return cs;
}
static struct i915_vma *
__lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 context_size;
context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
context_size += I915_GTT_PAGE_SIZE; /* for redzone */
if (GRAPHICS_VER(engine->i915) >= 12) {
ce->wa_bb_page = context_size / PAGE_SIZE;
context_size += PAGE_SIZE;
}
if (intel_context_is_parent(ce) && intel_engine_uses_guc(engine)) {
ce->parallel.guc.parent_page = context_size / PAGE_SIZE;
context_size += PARENT_SCRATCH_SIZE;
}
obj = i915_gem_object_create_lmem(engine->i915, context_size,
I915_BO_ALLOC_PM_VOLATILE);
if (IS_ERR(obj)) {
obj = i915_gem_object_create_shmem(engine->i915, context_size);
if (IS_ERR(obj))
return ERR_CAST(obj);
/*
* Wa_22016122933: For Media version 13.0, all Media GT shared
* memory needs to be mapped as WC on CPU side and UC (PAT
* index 2) on GPU side.
*/
if (intel_gt_needs_wa_22016122933(engine->gt))
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
}
vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
i915_gem_object_put(obj);
return vma;
}
return vma;
}
static struct intel_timeline *
pinned_timeline(struct intel_context *ce, struct intel_engine_cs *engine)
{
struct intel_timeline *tl = fetch_and_zero(&ce->timeline);
return intel_timeline_create_from_engine(engine, page_unmask_bits(tl));
}
int lrc_alloc(struct intel_context *ce, struct intel_engine_cs *engine)
{
struct intel_ring *ring;
struct i915_vma *vma;
int err;
GEM_BUG_ON(ce->state);
vma = __lrc_alloc_state(ce, engine);
if (IS_ERR(vma))
return PTR_ERR(vma);
ring = intel_engine_create_ring(engine, ce->ring_size);
if (IS_ERR(ring)) {
err = PTR_ERR(ring);
goto err_vma;
}
if (!page_mask_bits(ce->timeline)) {
struct intel_timeline *tl;
/*
* Use the static global HWSP for the kernel context, and
* a dynamically allocated cacheline for everyone else.
*/
if (unlikely(ce->timeline))
tl = pinned_timeline(ce, engine);
else
tl = intel_timeline_create(engine->gt);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
goto err_ring;
}
ce->timeline = tl;
}
ce->ring = ring;
ce->state = vma;
return 0;
err_ring:
intel_ring_put(ring);
err_vma:
i915_vma_put(vma);
return err;
}
void lrc_reset(struct intel_context *ce)
{
GEM_BUG_ON(!intel_context_is_pinned(ce));
intel_ring_reset(ce->ring, ce->ring->emit);
/* Scrub away the garbage */
lrc_init_regs(ce, ce->engine, true);
ce->lrc.lrca = lrc_update_regs(ce, ce->engine, ce->ring->tail);
}
int
lrc_pre_pin(struct intel_context *ce,
struct intel_engine_cs *engine,
struct i915_gem_ww_ctx *ww,
void **vaddr)
{
GEM_BUG_ON(!ce->state);
GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
*vaddr = i915_gem_object_pin_map(ce->state->obj,
intel_gt_coherent_map_type(ce->engine->gt,
ce->state->obj,
false) |
I915_MAP_OVERRIDE);
return PTR_ERR_OR_ZERO(*vaddr);
}
int
lrc_pin(struct intel_context *ce,
struct intel_engine_cs *engine,
void *vaddr)
{
ce->lrc_reg_state = vaddr + LRC_STATE_OFFSET;
if (!__test_and_set_bit(CONTEXT_INIT_BIT, &ce->flags))
lrc_init_state(ce, engine, vaddr);
ce->lrc.lrca = lrc_update_regs(ce, engine, ce->ring->tail);
return 0;
}
void lrc_unpin(struct intel_context *ce)
{
if (unlikely(ce->parallel.last_rq)) {
i915_request_put(ce->parallel.last_rq);
ce->parallel.last_rq = NULL;
}
check_redzone((void *)ce->lrc_reg_state - LRC_STATE_OFFSET,
ce->engine);
}
void lrc_post_unpin(struct intel_context *ce)
{
i915_gem_object_unpin_map(ce->state->obj);
}
void lrc_fini(struct intel_context *ce)
{
if (!ce->state)
return;
intel_ring_put(fetch_and_zero(&ce->ring));
i915_vma_put(fetch_and_zero(&ce->state));
}
void lrc_destroy(struct kref *kref)
{
struct intel_context *ce = container_of(kref, typeof(*ce), ref);
GEM_BUG_ON(!i915_active_is_idle(&ce->active));
GEM_BUG_ON(intel_context_is_pinned(ce));
lrc_fini(ce);
intel_context_fini(ce);
intel_context_free(ce);
}
static u32 *
gen12_emit_timestamp_wa(const struct intel_context *ce, u32 *cs)
{
*cs++ = MI_LOAD_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT |
MI_LRI_LRM_CS_MMIO;
*cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
*cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET +
CTX_TIMESTAMP * sizeof(u32);
*cs++ = 0;
*cs++ = MI_LOAD_REGISTER_REG |
MI_LRR_SOURCE_CS_MMIO |
MI_LRI_LRM_CS_MMIO;
*cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
*cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(0));
*cs++ = MI_LOAD_REGISTER_REG |
MI_LRR_SOURCE_CS_MMIO |
MI_LRI_LRM_CS_MMIO;
*cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
*cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(0));
return cs;
}
static u32 *
gen12_emit_restore_scratch(const struct intel_context *ce, u32 *cs)
{
GEM_BUG_ON(lrc_ring_gpr0(ce->engine) == -1);
*cs++ = MI_LOAD_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT |
MI_LRI_LRM_CS_MMIO;
*cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
*cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET +
(lrc_ring_gpr0(ce->engine) + 1) * sizeof(u32);
*cs++ = 0;
return cs;
}
static u32 *
gen12_emit_cmd_buf_wa(const struct intel_context *ce, u32 *cs)
{
GEM_BUG_ON(lrc_ring_cmd_buf_cctl(ce->engine) == -1);
*cs++ = MI_LOAD_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT |
MI_LRI_LRM_CS_MMIO;
*cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
*cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET +
(lrc_ring_cmd_buf_cctl(ce->engine) + 1) * sizeof(u32);
*cs++ = 0;
*cs++ = MI_LOAD_REGISTER_REG |
MI_LRR_SOURCE_CS_MMIO |
MI_LRI_LRM_CS_MMIO;
*cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
*cs++ = i915_mmio_reg_offset(RING_CMD_BUF_CCTL(0));
return cs;
}
/*
* On DG2 during context restore of a preempted context in GPGPU mode,
* RCS restore hang is detected. This is extremely timing dependent.
* To address this below sw wabb is implemented for DG2 A steppings.
*/
static u32 *
dg2_emit_rcs_hang_wabb(const struct intel_context *ce, u32 *cs)
{
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(GEN12_STATE_ACK_DEBUG(ce->engine->mmio_base));
*cs++ = 0x21;
*cs++ = MI_LOAD_REGISTER_REG;
*cs++ = i915_mmio_reg_offset(RING_NOPID(ce->engine->mmio_base));
*cs++ = i915_mmio_reg_offset(XEHP_CULLBIT1);
*cs++ = MI_LOAD_REGISTER_REG;
*cs++ = i915_mmio_reg_offset(RING_NOPID(ce->engine->mmio_base));
*cs++ = i915_mmio_reg_offset(XEHP_CULLBIT2);
return cs;
}
/*
* The bspec's tuning guide asks us to program a vertical watermark value of
* 0x3FF. However this register is not saved/restored properly by the
* hardware, so we're required to apply the desired value via INDIRECT_CTX
* batch buffer to ensure the value takes effect properly. All other bits
* in this register should remain at 0 (the hardware default).
*/
static u32 *
dg2_emit_draw_watermark_setting(u32 *cs)
{
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(DRAW_WATERMARK);
*cs++ = REG_FIELD_PREP(VERT_WM_VAL, 0x3FF);
return cs;
}
static u32 *
gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
{
cs = gen12_emit_timestamp_wa(ce, cs);
cs = gen12_emit_cmd_buf_wa(ce, cs);
cs = gen12_emit_restore_scratch(ce, cs);
/* Wa_22011450934:dg2 */
if (IS_DG2_GRAPHICS_STEP(ce->engine->i915, G10, STEP_A0, STEP_B0) ||
IS_DG2_GRAPHICS_STEP(ce->engine->i915, G11, STEP_A0, STEP_B0))
cs = dg2_emit_rcs_hang_wabb(ce, cs);
/* Wa_16013000631:dg2 */
if (IS_DG2_GRAPHICS_STEP(ce->engine->i915, G10, STEP_B0, STEP_C0) ||
IS_DG2_G11(ce->engine->i915))
cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0);
cs = gen12_emit_aux_table_inv(ce->engine, cs);
/* Wa_16014892111 */
if (IS_MTL_GRAPHICS_STEP(ce->engine->i915, M, STEP_A0, STEP_B0) ||
IS_MTL_GRAPHICS_STEP(ce->engine->i915, P, STEP_A0, STEP_B0) ||
IS_DG2(ce->engine->i915))
cs = dg2_emit_draw_watermark_setting(cs);
return cs;
}
static u32 *
gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
{
cs = gen12_emit_timestamp_wa(ce, cs);
cs = gen12_emit_restore_scratch(ce, cs);
/* Wa_16013000631:dg2 */
if (IS_DG2_GRAPHICS_STEP(ce->engine->i915, G10, STEP_B0, STEP_C0) ||
IS_DG2_G11(ce->engine->i915))
if (ce->engine->class == COMPUTE_CLASS)
cs = gen8_emit_pipe_control(cs,
PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE,
0);
return gen12_emit_aux_table_inv(ce->engine, cs);
}
static void
setup_indirect_ctx_bb(const struct intel_context *ce,
const struct intel_engine_cs *engine,
u32 *(*emit)(const struct intel_context *, u32 *))
{
u32 * const start = context_indirect_bb(ce);
u32 *cs;
cs = emit(ce, start);
GEM_BUG_ON(cs - start > I915_GTT_PAGE_SIZE / sizeof(*cs));
while ((unsigned long)cs % CACHELINE_BYTES)
*cs++ = MI_NOOP;
GEM_BUG_ON(cs - start > DG2_PREDICATE_RESULT_BB / sizeof(*start));
setup_predicate_disable_wa(ce, start + DG2_PREDICATE_RESULT_BB / sizeof(*start));
lrc_setup_indirect_ctx(ce->lrc_reg_state, engine,
lrc_indirect_bb(ce),
(cs - start) * sizeof(*cs));
}
/*
* The context descriptor encodes various attributes of a context,
* including its GTT address and some flags. Because it's fairly
* expensive to calculate, we'll just do it once and cache the result,
* which remains valid until the context is unpinned.
*
* This is what a descriptor looks like, from LSB to MSB::
*
* bits 0-11: flags, GEN8_CTX_* (cached in ctx->desc_template)
* bits 12-31: LRCA, GTT address of (the HWSP of) this context
* bits 32-52: ctx ID, a globally unique tag (highest bit used by GuC)
* bits 53-54: mbz, reserved for use by hardware
* bits 55-63: group ID, currently unused and set to 0
*
* Starting from Gen11, the upper dword of the descriptor has a new format:
*
* bits 32-36: reserved
* bits 37-47: SW context ID
* bits 48:53: engine instance
* bit 54: mbz, reserved for use by hardware
* bits 55-60: SW counter
* bits 61-63: engine class
*
* On Xe_HP, the upper dword of the descriptor has a new format:
*
* bits 32-37: virtual function number
* bit 38: mbz, reserved for use by hardware
* bits 39-54: SW context ID
* bits 55-57: reserved
* bits 58-63: SW counter
*
* engine info, SW context ID and SW counter need to form a unique number
* (Context ID) per lrc.
*/
static u32 lrc_descriptor(const struct intel_context *ce)
{
u32 desc;
desc = INTEL_LEGACY_32B_CONTEXT;
if (i915_vm_is_4lvl(ce->vm))
desc = INTEL_LEGACY_64B_CONTEXT;
desc <<= GEN8_CTX_ADDRESSING_MODE_SHIFT;
desc |= GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
if (GRAPHICS_VER(ce->vm->i915) == 8)
desc |= GEN8_CTX_L3LLC_COHERENT;
return i915_ggtt_offset(ce->state) | desc;
}
u32 lrc_update_regs(const struct intel_context *ce,
const struct intel_engine_cs *engine,
u32 head)
{
struct intel_ring *ring = ce->ring;
u32 *regs = ce->lrc_reg_state;
GEM_BUG_ON(!intel_ring_offset_valid(ring, head));
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
regs[CTX_RING_HEAD] = head;
regs[CTX_RING_TAIL] = ring->tail;
regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
/* RPCS */
if (engine->class == RENDER_CLASS) {
regs[CTX_R_PWR_CLK_STATE] =
intel_sseu_make_rpcs(engine->gt, &ce->sseu);
i915_oa_init_reg_state(ce, engine);
}
if (ce->wa_bb_page) {
u32 *(*fn)(const struct intel_context *ce, u32 *cs);
fn = gen12_emit_indirect_ctx_xcs;
if (ce->engine->class == RENDER_CLASS)
fn = gen12_emit_indirect_ctx_rcs;
/* Mutually exclusive wrt to global indirect bb */
GEM_BUG_ON(engine->wa_ctx.indirect_ctx.size);
setup_indirect_ctx_bb(ce, engine, fn);
}
return lrc_descriptor(ce) | CTX_DESC_FORCE_RESTORE;
}
void lrc_update_offsets(struct intel_context *ce,
struct intel_engine_cs *engine)
{
set_offsets(ce->lrc_reg_state, reg_offsets(engine), engine, false);
}
void lrc_check_regs(const struct intel_context *ce,
const struct intel_engine_cs *engine,
const char *when)
{
const struct intel_ring *ring = ce->ring;
u32 *regs = ce->lrc_reg_state;
bool valid = true;
int x;
if (regs[CTX_RING_START] != i915_ggtt_offset(ring->vma)) {
pr_err("%s: context submitted with incorrect RING_START [%08x], expected %08x\n",
engine->name,
regs[CTX_RING_START],
i915_ggtt_offset(ring->vma));
regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
valid = false;
}
if ((regs[CTX_RING_CTL] & ~(RING_WAIT | RING_WAIT_SEMAPHORE)) !=
(RING_CTL_SIZE(ring->size) | RING_VALID)) {
pr_err("%s: context submitted with incorrect RING_CTL [%08x], expected %08x\n",
engine->name,
regs[CTX_RING_CTL],
(u32)(RING_CTL_SIZE(ring->size) | RING_VALID));
regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
valid = false;
}
x = lrc_ring_mi_mode(engine);
if (x != -1 && regs[x + 1] & (regs[x + 1] >> 16) & STOP_RING) {
pr_err("%s: context submitted with STOP_RING [%08x] in RING_MI_MODE\n",
engine->name, regs[x + 1]);
regs[x + 1] &= ~STOP_RING;
regs[x + 1] |= STOP_RING << 16;
valid = false;
}
WARN_ONCE(!valid, "Invalid lrc state found %s submission\n", when);
}
/*
* In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
* PIPE_CONTROL instruction. This is required for the flush to happen correctly
* but there is a slight complication as this is applied in WA batch where the
* values are only initialized once so we cannot take register value at the
* beginning and reuse it further; hence we save its value to memory, upload a
* constant value with bit21 set and then we restore it back with the saved value.
* To simplify the WA, a constant value is formed by using the default value
* of this register. This shouldn't be a problem because we are only modifying
* it for a short period and this batch in non-premptible. We can ofcourse
* use additional instructions that read the actual value of the register
* at that time and set our bit of interest but it makes the WA complicated.
*
* This WA is also required for Gen9 so extracting as a function avoids
* code duplication.
*/
static u32 *
gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
{
/* NB no one else is allowed to scribble over scratch + 256! */
*batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
*batch++ = intel_gt_scratch_offset(engine->gt,
INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA);
*batch++ = 0;
*batch++ = MI_LOAD_REGISTER_IMM(1);
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
*batch++ = 0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES;
batch = gen8_emit_pipe_control(batch,
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_DC_FLUSH_ENABLE,
0);
*batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
*batch++ = intel_gt_scratch_offset(engine->gt,
INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA);
*batch++ = 0;
return batch;
}
/*
* Typically we only have one indirect_ctx and per_ctx batch buffer which are
* initialized at the beginning and shared across all contexts but this field
* helps us to have multiple batches at different offsets and select them based
* on a criteria. At the moment this batch always start at the beginning of the page
* and at this point we don't have multiple wa_ctx batch buffers.
*
* The number of WA applied are not known at the beginning; we use this field
* to return the no of DWORDS written.
*
* It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
* so it adds NOOPs as padding to make it cacheline aligned.
* MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
* makes a complete batch buffer.
*/
static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
{
/* WaDisableCtxRestoreArbitration:bdw,chv */
*batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
/* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
if (IS_BROADWELL(engine->i915))
batch = gen8_emit_flush_coherentl3_wa(engine, batch);
/* WaClearSlmSpaceAtContextSwitch:bdw,chv */
/* Actual scratch location is at 128 bytes offset */
batch = gen8_emit_pipe_control(batch,
PIPE_CONTROL_FLUSH_L3 |
PIPE_CONTROL_STORE_DATA_INDEX |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE,
LRC_PPHWSP_SCRATCH_ADDR);
*batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
/* Pad to end of cacheline */
while ((unsigned long)batch % CACHELINE_BYTES)
*batch++ = MI_NOOP;
/*
* MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
* execution depends on the length specified in terms of cache lines
* in the register CTX_RCS_INDIRECT_CTX
*/
return batch;
}
struct lri {
i915_reg_t reg;
u32 value;
};
static u32 *emit_lri(u32 *batch, const struct lri *lri, unsigned int count)
{
GEM_BUG_ON(!count || count > 63);
*batch++ = MI_LOAD_REGISTER_IMM(count);
do {
*batch++ = i915_mmio_reg_offset(lri->reg);
*batch++ = lri->value;
} while (lri++, --count);
*batch++ = MI_NOOP;
return batch;
}
static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
{
static const struct lri lri[] = {
/* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
{
COMMON_SLICE_CHICKEN2,
__MASKED_FIELD(GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE,
0),
},
/* BSpec: 11391 */
{
FF_SLICE_CHICKEN,
__MASKED_FIELD(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX,
FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX),
},
/* BSpec: 11299 */
{
_3D_CHICKEN3,
__MASKED_FIELD(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX,
_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX),
}
};
*batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
batch = gen8_emit_flush_coherentl3_wa(engine, batch);
/* WaClearSlmSpaceAtContextSwitch:skl,bxt,kbl,glk,cfl */
batch = gen8_emit_pipe_control(batch,
PIPE_CONTROL_FLUSH_L3 |
PIPE_CONTROL_STORE_DATA_INDEX |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE,
LRC_PPHWSP_SCRATCH_ADDR);
batch = emit_lri(batch, lri, ARRAY_SIZE(lri));
/* WaMediaPoolStateCmdInWABB:bxt,glk */
if (HAS_POOLED_EU(engine->i915)) {
/*
* EU pool configuration is setup along with golden context
* during context initialization. This value depends on
* device type (2x6 or 3x6) and needs to be updated based
* on which subslice is disabled especially for 2x6
* devices, however it is safe to load default
* configuration of 3x6 device instead of masking off
* corresponding bits because HW ignores bits of a disabled
* subslice and drops down to appropriate config. Please
* see render_state_setup() in i915_gem_render_state.c for
* possible configurations, to avoid duplication they are
* not shown here again.
*/
*batch++ = GEN9_MEDIA_POOL_STATE;
*batch++ = GEN9_MEDIA_POOL_ENABLE;
*batch++ = 0x00777000;
*batch++ = 0;
*batch++ = 0;
*batch++ = 0;
}
*batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
/* Pad to end of cacheline */
while ((unsigned long)batch % CACHELINE_BYTES)
*batch++ = MI_NOOP;
return batch;
}
#define CTX_WA_BB_SIZE (PAGE_SIZE)
static int lrc_create_wa_ctx(struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int err;
obj = i915_gem_object_create_shmem(engine->i915, CTX_WA_BB_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
}
engine->wa_ctx.vma = vma;
return 0;
err:
i915_gem_object_put(obj);
return err;
}
void lrc_fini_wa_ctx(struct intel_engine_cs *engine)
{
i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
}
typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch);
void lrc_init_wa_ctx(struct intel_engine_cs *engine)
{
struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
struct i915_wa_ctx_bb *wa_bb[] = {
&wa_ctx->indirect_ctx, &wa_ctx->per_ctx
};
wa_bb_func_t wa_bb_fn[ARRAY_SIZE(wa_bb)];
struct i915_gem_ww_ctx ww;
void *batch, *batch_ptr;
unsigned int i;
int err;
if (GRAPHICS_VER(engine->i915) >= 11 ||
!(engine->flags & I915_ENGINE_HAS_RCS_REG_STATE))
return;
if (GRAPHICS_VER(engine->i915) == 9) {
wa_bb_fn[0] = gen9_init_indirectctx_bb;
wa_bb_fn[1] = NULL;
} else if (GRAPHICS_VER(engine->i915) == 8) {
wa_bb_fn[0] = gen8_init_indirectctx_bb;
wa_bb_fn[1] = NULL;
}
err = lrc_create_wa_ctx(engine);
if (err) {
/*
* We continue even if we fail to initialize WA batch
* because we only expect rare glitches but nothing
* critical to prevent us from using GPU
*/
drm_err(&engine->i915->drm,
"Ignoring context switch w/a allocation error:%d\n",
err);
return;
}
if (!engine->wa_ctx.vma)
return;
i915_gem_ww_ctx_init(&ww, true);
retry:
err = i915_gem_object_lock(wa_ctx->vma->obj, &ww);
if (!err)
err = i915_ggtt_pin(wa_ctx->vma, &ww, 0, PIN_HIGH);
if (err)
goto err;
batch = i915_gem_object_pin_map(wa_ctx->vma->obj, I915_MAP_WB);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto err_unpin;
}
/*
* Emit the two workaround batch buffers, recording the offset from the
* start of the workaround batch buffer object for each and their
* respective sizes.
*/
batch_ptr = batch;
for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) {
wa_bb[i]->offset = batch_ptr - batch;
if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
CACHELINE_BYTES))) {
err = -EINVAL;
break;
}
if (wa_bb_fn[i])
batch_ptr = wa_bb_fn[i](engine, batch_ptr);
wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset);
}
GEM_BUG_ON(batch_ptr - batch > CTX_WA_BB_SIZE);
__i915_gem_object_flush_map(wa_ctx->vma->obj, 0, batch_ptr - batch);
__i915_gem_object_release_map(wa_ctx->vma->obj);
/* Verify that we can handle failure to setup the wa_ctx */
if (!err)
err = i915_inject_probe_error(engine->i915, -ENODEV);
err_unpin:
if (err)
i915_vma_unpin(wa_ctx->vma);
err:
if (err == -EDEADLK) {
err = i915_gem_ww_ctx_backoff(&ww);
if (!err)
goto retry;
}
i915_gem_ww_ctx_fini(&ww);
if (err) {
i915_vma_put(engine->wa_ctx.vma);
/* Clear all flags to prevent further use */
memset(wa_ctx, 0, sizeof(*wa_ctx));
}
}
static void st_runtime_underflow(struct intel_context_stats *stats, s32 dt)
{
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
stats->runtime.num_underflow++;
stats->runtime.max_underflow =
max_t(u32, stats->runtime.max_underflow, -dt);
#endif
}
static u32 lrc_get_runtime(const struct intel_context *ce)
{
/*
* We can use either ppHWSP[16] which is recorded before the context
* switch (and so excludes the cost of context switches) or use the
* value from the context image itself, which is saved/restored earlier
* and so includes the cost of the save.
*/
return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]);
}
void lrc_update_runtime(struct intel_context *ce)
{
struct intel_context_stats *stats = &ce->stats;
u32 old;
s32 dt;
old = stats->runtime.last;
stats->runtime.last = lrc_get_runtime(ce);
dt = stats->runtime.last - old;
if (!dt)
return;
if (unlikely(dt < 0)) {
CE_TRACE(ce, "runtime underflow: last=%u, new=%u, delta=%d\n",
old, stats->runtime.last, dt);
st_runtime_underflow(stats, dt);
return;
}
ewma_runtime_add(&stats->runtime.avg, dt);
stats->runtime.total += dt;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_lrc.c"
#endif
| linux-master | drivers/gpu/drm/i915/gt/intel_lrc.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2014 Intel Corporation
*
* Generated by: intel-gpu-tools-1.8-220-g01153e7
*/
#include "intel_renderstate.h"
static const u32 gen7_null_state_relocs[] = {
0x0000000c,
0x00000010,
0x00000018,
0x000001ec,
-1,
};
static const u32 gen7_null_state_batch[] = {
0x69040000,
0x61010008,
0x00000000,
0x00000001, /* reloc */
0x00000001, /* reloc */
0x00000000,
0x00000001, /* reloc */
0x00000000,
0x00000001,
0x00000000,
0x00000001,
0x790d0002,
0x00000000,
0x00000000,
0x00000000,
0x78180000,
0x00000001,
0x79160000,
0x00000008,
0x78300000,
0x02010040,
0x78310000,
0x04000000,
0x78320000,
0x04000000,
0x78330000,
0x02000000,
0x78100004,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x781b0005,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x781c0002,
0x00000000,
0x00000000,
0x00000000,
0x781d0004,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78110005,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78120002,
0x00000000,
0x00000000,
0x00000000,
0x78210000,
0x00000000,
0x78130005,
0x00000000,
0x20000000,
0x04000000,
0x00000000,
0x00000000,
0x00000000,
0x78140001,
0x20000800,
0x00000000,
0x781e0001,
0x00000000,
0x00000000,
0x78050005,
0xe0040000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78040001,
0x00000000,
0x00000000,
0x78240000,
0x00000240,
0x78230000,
0x00000260,
0x782f0000,
0x00000280,
0x781f000c,
0x00400810,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78200006,
0x000002c0,
0x08080000,
0x00000000,
0x28000402,
0x00060000,
0x00000000,
0x00000000,
0x78090005,
0x02000000,
0x22220000,
0x02f60000,
0x11230000,
0x02f60004,
0x11230000,
0x78080003,
0x00006008,
0x00000340, /* reloc */
0xffffffff,
0x00000000,
0x782a0000,
0x00000360,
0x79000002,
0xffffffff,
0x00000000,
0x00000000,
0x7b000005,
0x0000000f,
0x00000003,
0x00000000,
0x00000001,
0x00000000,
0x00000000,
0x05000000, /* cmds end */
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000031, /* state start */
0x00000003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0xf99a130c,
0x799a130c,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000492,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x0080005a,
0x2e2077bd,
0x000000c0,
0x008d0040,
0x0080005a,
0x2e6077bd,
0x000000d0,
0x008d0040,
0x02800031,
0x21801fa9,
0x008d0e20,
0x08840001,
0x00800001,
0x2e2003bd,
0x008d0180,
0x00000000,
0x00800001,
0x2e6003bd,
0x008d01c0,
0x00000000,
0x00800001,
0x2ea003bd,
0x008d0200,
0x00000000,
0x00800001,
0x2ee003bd,
0x008d0240,
0x00000000,
0x05800031,
0x20001fa8,
0x008d0e20,
0x90031000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000380,
0x000003a0,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000, /* state end */
};
RO_RENDERSTATE(7);
| linux-master | drivers/gpu/drm/i915/gt/gen7_renderstate.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2022 Intel Corporation
*/
#include "i915_drv.h"
#include "intel_gt_mcr.h"
#include "intel_gt_print.h"
#include "intel_gt_regs.h"
/**
* DOC: GT Multicast/Replicated (MCR) Register Support
*
* Some GT registers are designed as "multicast" or "replicated" registers:
* multiple instances of the same register share a single MMIO offset. MCR
* registers are generally used when the hardware needs to potentially track
* independent values of a register per hardware unit (e.g., per-subslice,
* per-L3bank, etc.). The specific types of replication that exist vary
* per-platform.
*
* MMIO accesses to MCR registers are controlled according to the settings
* programmed in the platform's MCR_SELECTOR register(s). MMIO writes to MCR
* registers can be done in either a (i.e., a single write updates all
* instances of the register to the same value) or unicast (a write updates only
* one specific instance). Reads of MCR registers always operate in a unicast
* manner regardless of how the multicast/unicast bit is set in MCR_SELECTOR.
* Selection of a specific MCR instance for unicast operations is referred to
* as "steering."
*
* If MCR register operations are steered toward a hardware unit that is
* fused off or currently powered down due to power gating, the MMIO operation
* is "terminated" by the hardware. Terminated read operations will return a
* value of zero and terminated unicast write operations will be silently
* ignored.
*/
#define HAS_MSLICE_STEERING(i915) (INTEL_INFO(i915)->has_mslice_steering)
static const char * const intel_steering_types[] = {
"L3BANK",
"MSLICE",
"LNCF",
"GAM",
"DSS",
"OADDRM",
"INSTANCE 0",
};
static const struct intel_mmio_range icl_l3bank_steering_table[] = {
{ 0x00B100, 0x00B3FF },
{},
};
/*
* Although the bspec lists more "MSLICE" ranges than shown here, some of those
* are of a "GAM" subclass that has special rules. Thus we use a separate
* GAM table farther down for those.
*/
static const struct intel_mmio_range xehpsdv_mslice_steering_table[] = {
{ 0x00DD00, 0x00DDFF },
{ 0x00E900, 0x00FFFF }, /* 0xEA00 - OxEFFF is unused */
{},
};
static const struct intel_mmio_range xehpsdv_gam_steering_table[] = {
{ 0x004000, 0x004AFF },
{ 0x00C800, 0x00CFFF },
{},
};
static const struct intel_mmio_range xehpsdv_lncf_steering_table[] = {
{ 0x00B000, 0x00B0FF },
{ 0x00D800, 0x00D8FF },
{},
};
static const struct intel_mmio_range dg2_lncf_steering_table[] = {
{ 0x00B000, 0x00B0FF },
{ 0x00D880, 0x00D8FF },
{},
};
/*
* We have several types of MCR registers on PVC where steering to (0,0)
* will always provide us with a non-terminated value. We'll stick them
* all in the same table for simplicity.
*/
static const struct intel_mmio_range pvc_instance0_steering_table[] = {
{ 0x004000, 0x004AFF }, /* HALF-BSLICE */
{ 0x008800, 0x00887F }, /* CC */
{ 0x008A80, 0x008AFF }, /* TILEPSMI */
{ 0x00B000, 0x00B0FF }, /* HALF-BSLICE */
{ 0x00B100, 0x00B3FF }, /* L3BANK */
{ 0x00C800, 0x00CFFF }, /* HALF-BSLICE */
{ 0x00D800, 0x00D8FF }, /* HALF-BSLICE */
{ 0x00DD00, 0x00DDFF }, /* BSLICE */
{ 0x00E900, 0x00E9FF }, /* HALF-BSLICE */
{ 0x00EC00, 0x00EEFF }, /* HALF-BSLICE */
{ 0x00F000, 0x00FFFF }, /* HALF-BSLICE */
{ 0x024180, 0x0241FF }, /* HALF-BSLICE */
{},
};
static const struct intel_mmio_range xelpg_instance0_steering_table[] = {
{ 0x000B00, 0x000BFF }, /* SQIDI */
{ 0x001000, 0x001FFF }, /* SQIDI */
{ 0x004000, 0x0048FF }, /* GAM */
{ 0x008700, 0x0087FF }, /* SQIDI */
{ 0x00B000, 0x00B0FF }, /* NODE */
{ 0x00C800, 0x00CFFF }, /* GAM */
{ 0x00D880, 0x00D8FF }, /* NODE */
{ 0x00DD00, 0x00DDFF }, /* OAAL2 */
{},
};
static const struct intel_mmio_range xelpg_l3bank_steering_table[] = {
{ 0x00B100, 0x00B3FF },
{},
};
/* DSS steering is used for SLICE ranges as well */
static const struct intel_mmio_range xelpg_dss_steering_table[] = {
{ 0x005200, 0x0052FF }, /* SLICE */
{ 0x005500, 0x007FFF }, /* SLICE */
{ 0x008140, 0x00815F }, /* SLICE (0x8140-0x814F), DSS (0x8150-0x815F) */
{ 0x0094D0, 0x00955F }, /* SLICE (0x94D0-0x951F), DSS (0x9520-0x955F) */
{ 0x009680, 0x0096FF }, /* DSS */
{ 0x00D800, 0x00D87F }, /* SLICE */
{ 0x00DC00, 0x00DCFF }, /* SLICE */
{ 0x00DE80, 0x00E8FF }, /* DSS (0xE000-0xE0FF reserved) */
{},
};
static const struct intel_mmio_range xelpmp_oaddrm_steering_table[] = {
{ 0x393200, 0x39323F },
{ 0x393400, 0x3934FF },
{},
};
void intel_gt_mcr_init(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
unsigned long fuse;
int i;
spin_lock_init(>->mcr_lock);
/*
* An mslice is unavailable only if both the meml3 for the slice is
* disabled *and* all of the DSS in the slice (quadrant) are disabled.
*/
if (HAS_MSLICE_STEERING(i915)) {
gt->info.mslice_mask =
intel_slicemask_from_xehp_dssmask(gt->info.sseu.subslice_mask,
GEN_DSS_PER_MSLICE);
gt->info.mslice_mask |=
(intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
GEN12_MEML3_EN_MASK);
if (!gt->info.mslice_mask) /* should be impossible! */
gt_warn(gt, "mslice mask all zero!\n");
}
if (MEDIA_VER(i915) >= 13 && gt->type == GT_MEDIA) {
gt->steering_table[OADDRM] = xelpmp_oaddrm_steering_table;
} else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) {
/* Wa_14016747170 */
if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0))
fuse = REG_FIELD_GET(MTL_GT_L3_EXC_MASK,
intel_uncore_read(gt->uncore,
MTL_GT_ACTIVITY_FACTOR));
else
fuse = REG_FIELD_GET(GT_L3_EXC_MASK,
intel_uncore_read(gt->uncore, XEHP_FUSE4));
/*
* Despite the register field being named "exclude mask" the
* bits actually represent enabled banks (two banks per bit).
*/
for_each_set_bit(i, &fuse, 3)
gt->info.l3bank_mask |= 0x3 << 2 * i;
gt->steering_table[INSTANCE0] = xelpg_instance0_steering_table;
gt->steering_table[L3BANK] = xelpg_l3bank_steering_table;
gt->steering_table[DSS] = xelpg_dss_steering_table;
} else if (IS_PONTEVECCHIO(i915)) {
gt->steering_table[INSTANCE0] = pvc_instance0_steering_table;
} else if (IS_DG2(i915)) {
gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
gt->steering_table[LNCF] = dg2_lncf_steering_table;
/*
* No need to hook up the GAM table since it has a dedicated
* steering control register on DG2 and can use implicit
* steering.
*/
} else if (IS_XEHPSDV(i915)) {
gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
gt->steering_table[LNCF] = xehpsdv_lncf_steering_table;
gt->steering_table[GAM] = xehpsdv_gam_steering_table;
} else if (GRAPHICS_VER(i915) >= 11 &&
GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) {
gt->steering_table[L3BANK] = icl_l3bank_steering_table;
gt->info.l3bank_mask =
~intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
GEN10_L3BANK_MASK;
if (!gt->info.l3bank_mask) /* should be impossible! */
gt_warn(gt, "L3 bank mask is all zero!\n");
} else if (GRAPHICS_VER(i915) >= 11) {
/*
* We expect all modern platforms to have at least some
* type of steering that needs to be initialized.
*/
MISSING_CASE(INTEL_INFO(i915)->platform);
}
}
/*
* Although the rest of the driver should use MCR-specific functions to
* read/write MCR registers, we still use the regular intel_uncore_* functions
* internally to implement those, so we need a way for the functions in this
* file to "cast" an i915_mcr_reg_t into an i915_reg_t.
*/
static i915_reg_t mcr_reg_cast(const i915_mcr_reg_t mcr)
{
i915_reg_t r = { .reg = mcr.reg };
return r;
}
/*
* rw_with_mcr_steering_fw - Access a register with specific MCR steering
* @gt: GT to read register from
* @reg: register being accessed
* @rw_flag: FW_REG_READ for read access or FW_REG_WRITE for write access
* @group: group number (documented as "sliceid" on older platforms)
* @instance: instance number (documented as "subsliceid" on older platforms)
* @value: register value to be written (ignored for read)
*
* Context: The caller must hold the MCR lock
* Return: 0 for write access. register value for read access.
*
* Caller needs to make sure the relevant forcewake wells are up.
*/
static u32 rw_with_mcr_steering_fw(struct intel_gt *gt,
i915_mcr_reg_t reg, u8 rw_flag,
int group, int instance, u32 value)
{
struct intel_uncore *uncore = gt->uncore;
u32 mcr_mask, mcr_ss, mcr, old_mcr, val = 0;
lockdep_assert_held(>->mcr_lock);
if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 70)) {
/*
* Always leave the hardware in multicast mode when doing reads
* (see comment about Wa_22013088509 below) and only change it
* to unicast mode when doing writes of a specific instance.
*
* No need to save old steering reg value.
*/
intel_uncore_write_fw(uncore, MTL_MCR_SELECTOR,
REG_FIELD_PREP(MTL_MCR_GROUPID, group) |
REG_FIELD_PREP(MTL_MCR_INSTANCEID, instance) |
(rw_flag == FW_REG_READ ? GEN11_MCR_MULTICAST : 0));
} else if (GRAPHICS_VER(uncore->i915) >= 11) {
mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
mcr_ss = GEN11_MCR_SLICE(group) | GEN11_MCR_SUBSLICE(instance);
/*
* Wa_22013088509
*
* The setting of the multicast/unicast bit usually wouldn't
* matter for read operations (which always return the value
* from a single register instance regardless of how that bit
* is set), but some platforms have a workaround requiring us
* to remain in multicast mode for reads. There's no real
* downside to this, so we'll just go ahead and do so on all
* platforms; we'll only clear the multicast bit from the mask
* when exlicitly doing a write operation.
*/
if (rw_flag == FW_REG_WRITE)
mcr_mask |= GEN11_MCR_MULTICAST;
mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
old_mcr = mcr;
mcr &= ~mcr_mask;
mcr |= mcr_ss;
intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
} else {
mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
mcr_ss = GEN8_MCR_SLICE(group) | GEN8_MCR_SUBSLICE(instance);
mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
old_mcr = mcr;
mcr &= ~mcr_mask;
mcr |= mcr_ss;
intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
}
if (rw_flag == FW_REG_READ)
val = intel_uncore_read_fw(uncore, mcr_reg_cast(reg));
else
intel_uncore_write_fw(uncore, mcr_reg_cast(reg), value);
/*
* For pre-MTL platforms, we need to restore the old value of the
* steering control register to ensure that implicit steering continues
* to behave as expected. For MTL and beyond, we need only reinstate
* the 'multicast' bit (and only if we did a write that cleared it).
*/
if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 70) && rw_flag == FW_REG_WRITE)
intel_uncore_write_fw(uncore, MTL_MCR_SELECTOR, GEN11_MCR_MULTICAST);
else if (GRAPHICS_VER_FULL(uncore->i915) < IP_VER(12, 70))
intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, old_mcr);
return val;
}
static u32 rw_with_mcr_steering(struct intel_gt *gt,
i915_mcr_reg_t reg, u8 rw_flag,
int group, int instance,
u32 value)
{
struct intel_uncore *uncore = gt->uncore;
enum forcewake_domains fw_domains;
unsigned long flags;
u32 val;
fw_domains = intel_uncore_forcewake_for_reg(uncore, mcr_reg_cast(reg),
rw_flag);
fw_domains |= intel_uncore_forcewake_for_reg(uncore,
GEN8_MCR_SELECTOR,
FW_REG_READ | FW_REG_WRITE);
intel_gt_mcr_lock(gt, &flags);
spin_lock(&uncore->lock);
intel_uncore_forcewake_get__locked(uncore, fw_domains);
val = rw_with_mcr_steering_fw(gt, reg, rw_flag, group, instance, value);
intel_uncore_forcewake_put__locked(uncore, fw_domains);
spin_unlock(&uncore->lock);
intel_gt_mcr_unlock(gt, flags);
return val;
}
/**
* intel_gt_mcr_lock - Acquire MCR steering lock
* @gt: GT structure
* @flags: storage to save IRQ flags to
*
* Performs locking to protect the steering for the duration of an MCR
* operation. On MTL and beyond, a hardware lock will also be taken to
* serialize access not only for the driver, but also for external hardware and
* firmware agents.
*
* Context: Takes gt->mcr_lock. uncore->lock should *not* be held when this
* function is called, although it may be acquired after this
* function call.
*/
void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags)
__acquires(>->mcr_lock)
{
unsigned long __flags;
int err = 0;
lockdep_assert_not_held(>->uncore->lock);
/*
* Starting with MTL, we need to coordinate not only with other
* driver threads, but also with hardware/firmware agents. A dedicated
* locking register is used.
*/
if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
err = wait_for(intel_uncore_read_fw(gt->uncore,
MTL_STEER_SEMAPHORE) == 0x1, 100);
/*
* Even on platforms with a hardware lock, we'll continue to grab
* a software spinlock too for lockdep purposes. If the hardware lock
* was already acquired, there should never be contention on the
* software lock.
*/
spin_lock_irqsave(>->mcr_lock, __flags);
*flags = __flags;
/*
* In theory we should never fail to acquire the HW semaphore; this
* would indicate some hardware/firmware is misbehaving and not
* releasing it properly.
*/
if (err == -ETIMEDOUT) {
gt_err_ratelimited(gt, "hardware MCR steering semaphore timed out");
add_taint_for_CI(gt->i915, TAINT_WARN); /* CI is now unreliable */
}
}
/**
* intel_gt_mcr_unlock - Release MCR steering lock
* @gt: GT structure
* @flags: IRQ flags to restore
*
* Releases the lock acquired by intel_gt_mcr_lock().
*
* Context: Releases gt->mcr_lock
*/
void intel_gt_mcr_unlock(struct intel_gt *gt, unsigned long flags)
__releases(>->mcr_lock)
{
spin_unlock_irqrestore(>->mcr_lock, flags);
if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
intel_uncore_write_fw(gt->uncore, MTL_STEER_SEMAPHORE, 0x1);
}
/**
* intel_gt_mcr_read - read a specific instance of an MCR register
* @gt: GT structure
* @reg: the MCR register to read
* @group: the MCR group
* @instance: the MCR instance
*
* Context: Takes and releases gt->mcr_lock
*
* Returns the value read from an MCR register after steering toward a specific
* group/instance.
*/
u32 intel_gt_mcr_read(struct intel_gt *gt,
i915_mcr_reg_t reg,
int group, int instance)
{
return rw_with_mcr_steering(gt, reg, FW_REG_READ, group, instance, 0);
}
/**
* intel_gt_mcr_unicast_write - write a specific instance of an MCR register
* @gt: GT structure
* @reg: the MCR register to write
* @value: value to write
* @group: the MCR group
* @instance: the MCR instance
*
* Write an MCR register in unicast mode after steering toward a specific
* group/instance.
*
* Context: Calls a function that takes and releases gt->mcr_lock
*/
void intel_gt_mcr_unicast_write(struct intel_gt *gt, i915_mcr_reg_t reg, u32 value,
int group, int instance)
{
rw_with_mcr_steering(gt, reg, FW_REG_WRITE, group, instance, value);
}
/**
* intel_gt_mcr_multicast_write - write a value to all instances of an MCR register
* @gt: GT structure
* @reg: the MCR register to write
* @value: value to write
*
* Write an MCR register in multicast mode to update all instances.
*
* Context: Takes and releases gt->mcr_lock
*/
void intel_gt_mcr_multicast_write(struct intel_gt *gt,
i915_mcr_reg_t reg, u32 value)
{
unsigned long flags;
intel_gt_mcr_lock(gt, &flags);
/*
* Ensure we have multicast behavior, just in case some non-i915 agent
* left the hardware in unicast mode.
*/
if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
intel_uncore_write_fw(gt->uncore, MTL_MCR_SELECTOR, GEN11_MCR_MULTICAST);
intel_uncore_write(gt->uncore, mcr_reg_cast(reg), value);
intel_gt_mcr_unlock(gt, flags);
}
/**
* intel_gt_mcr_multicast_write_fw - write a value to all instances of an MCR register
* @gt: GT structure
* @reg: the MCR register to write
* @value: value to write
*
* Write an MCR register in multicast mode to update all instances. This
* function assumes the caller is already holding any necessary forcewake
* domains; use intel_gt_mcr_multicast_write() in cases where forcewake should
* be obtained automatically.
*
* Context: The caller must hold gt->mcr_lock.
*/
void intel_gt_mcr_multicast_write_fw(struct intel_gt *gt, i915_mcr_reg_t reg, u32 value)
{
lockdep_assert_held(>->mcr_lock);
/*
* Ensure we have multicast behavior, just in case some non-i915 agent
* left the hardware in unicast mode.
*/
if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
intel_uncore_write_fw(gt->uncore, MTL_MCR_SELECTOR, GEN11_MCR_MULTICAST);
intel_uncore_write_fw(gt->uncore, mcr_reg_cast(reg), value);
}
/**
* intel_gt_mcr_multicast_rmw - Performs a multicast RMW operations
* @gt: GT structure
* @reg: the MCR register to read and write
* @clear: bits to clear during RMW
* @set: bits to set during RMW
*
* Performs a read-modify-write on an MCR register in a multicast manner.
* This operation only makes sense on MCR registers where all instances are
* expected to have the same value. The read will target any non-terminated
* instance and the write will be applied to all instances.
*
* This function assumes the caller is already holding any necessary forcewake
* domains; use intel_gt_mcr_multicast_rmw() in cases where forcewake should
* be obtained automatically.
*
* Context: Calls functions that take and release gt->mcr_lock
*
* Returns the old (unmodified) value read.
*/
u32 intel_gt_mcr_multicast_rmw(struct intel_gt *gt, i915_mcr_reg_t reg,
u32 clear, u32 set)
{
u32 val = intel_gt_mcr_read_any(gt, reg);
intel_gt_mcr_multicast_write(gt, reg, (val & ~clear) | set);
return val;
}
/*
* reg_needs_read_steering - determine whether a register read requires
* explicit steering
* @gt: GT structure
* @reg: the register to check steering requirements for
* @type: type of multicast steering to check
*
* Determines whether @reg needs explicit steering of a specific type for
* reads.
*
* Returns false if @reg does not belong to a register range of the given
* steering type, or if the default (subslice-based) steering IDs are suitable
* for @type steering too.
*/
static bool reg_needs_read_steering(struct intel_gt *gt,
i915_mcr_reg_t reg,
enum intel_steering_type type)
{
u32 offset = i915_mmio_reg_offset(reg);
const struct intel_mmio_range *entry;
if (likely(!gt->steering_table[type]))
return false;
if (IS_GSI_REG(offset))
offset += gt->uncore->gsi_offset;
for (entry = gt->steering_table[type]; entry->end; entry++) {
if (offset >= entry->start && offset <= entry->end)
return true;
}
return false;
}
/*
* get_nonterminated_steering - determines valid IDs for a class of MCR steering
* @gt: GT structure
* @type: multicast register type
* @group: Group ID returned
* @instance: Instance ID returned
*
* Determines group and instance values that will steer reads of the specified
* MCR class to a non-terminated instance.
*/
static void get_nonterminated_steering(struct intel_gt *gt,
enum intel_steering_type type,
u8 *group, u8 *instance)
{
u32 dss;
switch (type) {
case L3BANK:
*group = 0; /* unused */
*instance = __ffs(gt->info.l3bank_mask);
break;
case MSLICE:
GEM_WARN_ON(!HAS_MSLICE_STEERING(gt->i915));
*group = __ffs(gt->info.mslice_mask);
*instance = 0; /* unused */
break;
case LNCF:
/*
* An LNCF is always present if its mslice is present, so we
* can safely just steer to LNCF 0 in all cases.
*/
GEM_WARN_ON(!HAS_MSLICE_STEERING(gt->i915));
*group = __ffs(gt->info.mslice_mask) << 1;
*instance = 0; /* unused */
break;
case GAM:
*group = IS_DG2(gt->i915) ? 1 : 0;
*instance = 0;
break;
case DSS:
dss = intel_sseu_find_first_xehp_dss(>->info.sseu, 0, 0);
*group = dss / GEN_DSS_PER_GSLICE;
*instance = dss % GEN_DSS_PER_GSLICE;
break;
case INSTANCE0:
/*
* There are a lot of MCR types for which instance (0, 0)
* will always provide a non-terminated value.
*/
*group = 0;
*instance = 0;
break;
case OADDRM:
if ((VDBOX_MASK(gt) | VEBOX_MASK(gt) | gt->info.sfc_mask) & BIT(0))
*group = 0;
else
*group = 1;
*instance = 0;
break;
default:
MISSING_CASE(type);
*group = 0;
*instance = 0;
}
}
/**
* intel_gt_mcr_get_nonterminated_steering - find group/instance values that
* will steer a register to a non-terminated instance
* @gt: GT structure
* @reg: register for which the steering is required
* @group: return variable for group steering
* @instance: return variable for instance steering
*
* This function returns a group/instance pair that is guaranteed to work for
* read steering of the given register. Note that a value will be returned even
* if the register is not replicated and therefore does not actually require
* steering.
*/
void intel_gt_mcr_get_nonterminated_steering(struct intel_gt *gt,
i915_mcr_reg_t reg,
u8 *group, u8 *instance)
{
int type;
for (type = 0; type < NUM_STEERING_TYPES; type++) {
if (reg_needs_read_steering(gt, reg, type)) {
get_nonterminated_steering(gt, type, group, instance);
return;
}
}
*group = gt->default_steering.groupid;
*instance = gt->default_steering.instanceid;
}
/**
* intel_gt_mcr_read_any_fw - reads one instance of an MCR register
* @gt: GT structure
* @reg: register to read
*
* Reads a GT MCR register. The read will be steered to a non-terminated
* instance (i.e., one that isn't fused off or powered down by power gating).
* This function assumes the caller is already holding any necessary forcewake
* domains; use intel_gt_mcr_read_any() in cases where forcewake should be
* obtained automatically.
*
* Context: The caller must hold gt->mcr_lock.
*
* Returns the value from a non-terminated instance of @reg.
*/
u32 intel_gt_mcr_read_any_fw(struct intel_gt *gt, i915_mcr_reg_t reg)
{
int type;
u8 group, instance;
lockdep_assert_held(>->mcr_lock);
for (type = 0; type < NUM_STEERING_TYPES; type++) {
if (reg_needs_read_steering(gt, reg, type)) {
get_nonterminated_steering(gt, type, &group, &instance);
return rw_with_mcr_steering_fw(gt, reg,
FW_REG_READ,
group, instance, 0);
}
}
return intel_uncore_read_fw(gt->uncore, mcr_reg_cast(reg));
}
/**
* intel_gt_mcr_read_any - reads one instance of an MCR register
* @gt: GT structure
* @reg: register to read
*
* Reads a GT MCR register. The read will be steered to a non-terminated
* instance (i.e., one that isn't fused off or powered down by power gating).
*
* Context: Calls a function that takes and releases gt->mcr_lock.
*
* Returns the value from a non-terminated instance of @reg.
*/
u32 intel_gt_mcr_read_any(struct intel_gt *gt, i915_mcr_reg_t reg)
{
int type;
u8 group, instance;
for (type = 0; type < NUM_STEERING_TYPES; type++) {
if (reg_needs_read_steering(gt, reg, type)) {
get_nonterminated_steering(gt, type, &group, &instance);
return rw_with_mcr_steering(gt, reg,
FW_REG_READ,
group, instance, 0);
}
}
return intel_uncore_read(gt->uncore, mcr_reg_cast(reg));
}
static void report_steering_type(struct drm_printer *p,
struct intel_gt *gt,
enum intel_steering_type type,
bool dump_table)
{
const struct intel_mmio_range *entry;
u8 group, instance;
BUILD_BUG_ON(ARRAY_SIZE(intel_steering_types) != NUM_STEERING_TYPES);
if (!gt->steering_table[type]) {
drm_printf(p, "%s steering: uses default steering\n",
intel_steering_types[type]);
return;
}
get_nonterminated_steering(gt, type, &group, &instance);
drm_printf(p, "%s steering: group=0x%x, instance=0x%x\n",
intel_steering_types[type], group, instance);
if (!dump_table)
return;
for (entry = gt->steering_table[type]; entry->end; entry++)
drm_printf(p, "\t0x%06x - 0x%06x\n", entry->start, entry->end);
}
void intel_gt_mcr_report_steering(struct drm_printer *p, struct intel_gt *gt,
bool dump_table)
{
/*
* Starting with MTL we no longer have default steering;
* all ranges are explicitly steered.
*/
if (GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 70))
drm_printf(p, "Default steering: group=0x%x, instance=0x%x\n",
gt->default_steering.groupid,
gt->default_steering.instanceid);
if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) {
for (int i = 0; i < NUM_STEERING_TYPES; i++)
if (gt->steering_table[i])
report_steering_type(p, gt, i, dump_table);
} else if (IS_PONTEVECCHIO(gt->i915)) {
report_steering_type(p, gt, INSTANCE0, dump_table);
} else if (HAS_MSLICE_STEERING(gt->i915)) {
report_steering_type(p, gt, MSLICE, dump_table);
report_steering_type(p, gt, LNCF, dump_table);
}
}
/**
* intel_gt_mcr_get_ss_steering - returns the group/instance steering for a SS
* @gt: GT structure
* @dss: DSS ID to obtain steering for
* @group: pointer to storage for steering group ID
* @instance: pointer to storage for steering instance ID
*
* Returns the steering IDs (via the @group and @instance parameters) that
* correspond to a specific subslice/DSS ID.
*/
void intel_gt_mcr_get_ss_steering(struct intel_gt *gt, unsigned int dss,
unsigned int *group, unsigned int *instance)
{
if (IS_PONTEVECCHIO(gt->i915)) {
*group = dss / GEN_DSS_PER_CSLICE;
*instance = dss % GEN_DSS_PER_CSLICE;
} else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50)) {
*group = dss / GEN_DSS_PER_GSLICE;
*instance = dss % GEN_DSS_PER_GSLICE;
} else {
*group = dss / GEN_MAX_SS_PER_HSW_SLICE;
*instance = dss % GEN_MAX_SS_PER_HSW_SLICE;
return;
}
}
/**
* intel_gt_mcr_wait_for_reg - wait until MCR register matches expected state
* @gt: GT structure
* @reg: the register to read
* @mask: mask to apply to register value
* @value: value to wait for
* @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
* @slow_timeout_ms: slow timeout in millisecond
*
* This routine waits until the target register @reg contains the expected
* @value after applying the @mask, i.e. it waits until ::
*
* (intel_gt_mcr_read_any_fw(gt, reg) & mask) == value
*
* Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
* For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
* must be not larger than 20,0000 microseconds.
*
* This function is basically an MCR-friendly version of
* __intel_wait_for_register_fw(). Generally this function will only be used
* on GAM registers which are a bit special --- although they're MCR registers,
* reads (e.g., waiting for status updates) are always directed to the primary
* instance.
*
* Note that this routine assumes the caller holds forcewake asserted, it is
* not suitable for very long waits.
*
* Context: Calls a function that takes and releases gt->mcr_lock
* Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
*/
int intel_gt_mcr_wait_for_reg(struct intel_gt *gt,
i915_mcr_reg_t reg,
u32 mask,
u32 value,
unsigned int fast_timeout_us,
unsigned int slow_timeout_ms)
{
int ret;
lockdep_assert_not_held(>->mcr_lock);
#define done ((intel_gt_mcr_read_any(gt, reg) & mask) == value)
/* Catch any overuse of this function */
might_sleep_if(slow_timeout_ms);
GEM_BUG_ON(fast_timeout_us > 20000);
GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms);
ret = -ETIMEDOUT;
if (fast_timeout_us && fast_timeout_us <= 20000)
ret = _wait_for_atomic(done, fast_timeout_us, 0);
if (ret && slow_timeout_ms)
ret = wait_for(done, slow_timeout_ms);
return ret;
#undef done
}
| linux-master | drivers/gpu/drm/i915/gt/intel_gt_mcr.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
#include <linux/kobject.h>
#include <linux/sysfs.h>
#include "i915_drv.h"
#include "intel_engine.h"
#include "intel_engine_heartbeat.h"
#include "sysfs_engines.h"
struct kobj_engine {
struct kobject base;
struct intel_engine_cs *engine;
};
static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj)
{
return container_of(kobj, struct kobj_engine, base)->engine;
}
static ssize_t
name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%s\n", kobj_to_engine(kobj)->name);
}
static const struct kobj_attribute name_attr =
__ATTR(name, 0444, name_show, NULL);
static ssize_t
class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
}
static const struct kobj_attribute class_attr =
__ATTR(class, 0444, class_show, NULL);
static ssize_t
inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
}
static const struct kobj_attribute inst_attr =
__ATTR(instance, 0444, inst_show, NULL);
static ssize_t
mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
}
static const struct kobj_attribute mmio_attr =
__ATTR(mmio_base, 0444, mmio_show, NULL);
static const char * const vcs_caps[] = {
[ilog2(I915_VIDEO_CLASS_CAPABILITY_HEVC)] = "hevc",
[ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
};
static const char * const vecs_caps[] = {
[ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
};
static ssize_t repr_trim(char *buf, ssize_t len)
{
/* Trim off the trailing space and replace with a newline */
if (len > PAGE_SIZE)
len = PAGE_SIZE;
if (len > 0)
buf[len - 1] = '\n';
return len;
}
static ssize_t
__caps_show(struct intel_engine_cs *engine,
unsigned long caps, char *buf, bool show_unknown)
{
const char * const *repr;
int count, n;
ssize_t len;
switch (engine->class) {
case VIDEO_DECODE_CLASS:
repr = vcs_caps;
count = ARRAY_SIZE(vcs_caps);
break;
case VIDEO_ENHANCEMENT_CLASS:
repr = vecs_caps;
count = ARRAY_SIZE(vecs_caps);
break;
default:
repr = NULL;
count = 0;
break;
}
GEM_BUG_ON(count > BITS_PER_LONG);
len = 0;
for_each_set_bit(n, &caps, show_unknown ? BITS_PER_LONG : count) {
if (n >= count || !repr[n]) {
if (GEM_WARN_ON(show_unknown))
len += sysfs_emit_at(buf, len, "[%x] ", n);
} else {
len += sysfs_emit_at(buf, len, "%s ", repr[n]);
}
if (GEM_WARN_ON(len >= PAGE_SIZE))
break;
}
return repr_trim(buf, len);
}
static ssize_t
caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
return __caps_show(engine, engine->uabi_capabilities, buf, true);
}
static const struct kobj_attribute caps_attr =
__ATTR(capabilities, 0444, caps_show, NULL);
static ssize_t
all_caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
return __caps_show(kobj_to_engine(kobj), -1, buf, false);
}
static const struct kobj_attribute all_caps_attr =
__ATTR(known_capabilities, 0444, all_caps_show, NULL);
static ssize_t
max_spin_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
unsigned long long duration, clamped;
int err;
/*
* When waiting for a request, if is it currently being executed
* on the GPU, we busywait for a short while before sleeping. The
* premise is that most requests are short, and if it is already
* executing then there is a good chance that it will complete
* before we can setup the interrupt handler and go to sleep.
* We try to offset the cost of going to sleep, by first spinning
* on the request -- if it completed in less time than it would take
* to go sleep, process the interrupt and return back to the client,
* then we have saved the client some latency, albeit at the cost
* of spinning on an expensive CPU core.
*
* While we try to avoid waiting at all for a request that is unlikely
* to complete, deciding how long it is worth spinning is for is an
* arbitrary decision: trading off power vs latency.
*/
err = kstrtoull(buf, 0, &duration);
if (err)
return err;
clamped = intel_clamp_max_busywait_duration_ns(engine, duration);
if (duration != clamped)
return -EINVAL;
WRITE_ONCE(engine->props.max_busywait_duration_ns, duration);
return count;
}
static ssize_t
max_spin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
return sysfs_emit(buf, "%lu\n", engine->props.max_busywait_duration_ns);
}
static const struct kobj_attribute max_spin_attr =
__ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store);
static ssize_t
max_spin_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
return sysfs_emit(buf, "%lu\n", engine->defaults.max_busywait_duration_ns);
}
static const struct kobj_attribute max_spin_def =
__ATTR(max_busywait_duration_ns, 0444, max_spin_default, NULL);
static ssize_t
timeslice_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
unsigned long long duration, clamped;
int err;
/*
* Execlists uses a scheduling quantum (a timeslice) to alternate
* execution between ready-to-run contexts of equal priority. This
* ensures that all users (though only if they of equal importance)
* have the opportunity to run and prevents livelocks where contexts
* may have implicit ordering due to userspace semaphores.
*/
err = kstrtoull(buf, 0, &duration);
if (err)
return err;
clamped = intel_clamp_timeslice_duration_ms(engine, duration);
if (duration != clamped)
return -EINVAL;
WRITE_ONCE(engine->props.timeslice_duration_ms, duration);
if (execlists_active(&engine->execlists))
set_timer_ms(&engine->execlists.timer, duration);
return count;
}
static ssize_t
timeslice_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
return sysfs_emit(buf, "%lu\n", engine->props.timeslice_duration_ms);
}
static const struct kobj_attribute timeslice_duration_attr =
__ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store);
static ssize_t
timeslice_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
return sysfs_emit(buf, "%lu\n", engine->defaults.timeslice_duration_ms);
}
static const struct kobj_attribute timeslice_duration_def =
__ATTR(timeslice_duration_ms, 0444, timeslice_default, NULL);
static ssize_t
stop_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
unsigned long long duration, clamped;
int err;
/*
* When we allow ourselves to sleep before a GPU reset after disabling
* submission, even for a few milliseconds, gives an innocent context
* the opportunity to clear the GPU before the reset occurs. However,
* how long to sleep depends on the typical non-preemptible duration
* (a similar problem to determining the ideal preempt-reset timeout
* or even the heartbeat interval).
*/
err = kstrtoull(buf, 0, &duration);
if (err)
return err;
clamped = intel_clamp_stop_timeout_ms(engine, duration);
if (duration != clamped)
return -EINVAL;
WRITE_ONCE(engine->props.stop_timeout_ms, duration);
return count;
}
static ssize_t
stop_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
return sysfs_emit(buf, "%lu\n", engine->props.stop_timeout_ms);
}
static const struct kobj_attribute stop_timeout_attr =
__ATTR(stop_timeout_ms, 0644, stop_show, stop_store);
static ssize_t
stop_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
return sysfs_emit(buf, "%lu\n", engine->defaults.stop_timeout_ms);
}
static const struct kobj_attribute stop_timeout_def =
__ATTR(stop_timeout_ms, 0444, stop_default, NULL);
static ssize_t
preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
unsigned long long timeout, clamped;
int err;
/*
* After initialising a preemption request, we give the current
* resident a small amount of time to vacate the GPU. The preemption
* request is for a higher priority context and should be immediate to
* maintain high quality of service (and avoid priority inversion).
* However, the preemption granularity of the GPU can be quite coarse
* and so we need a compromise.
*/
err = kstrtoull(buf, 0, &timeout);
if (err)
return err;
clamped = intel_clamp_preempt_timeout_ms(engine, timeout);
if (timeout != clamped)
return -EINVAL;
WRITE_ONCE(engine->props.preempt_timeout_ms, timeout);
if (READ_ONCE(engine->execlists.pending[0]))
set_timer_ms(&engine->execlists.preempt, timeout);
return count;
}
static ssize_t
preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
return sysfs_emit(buf, "%lu\n", engine->props.preempt_timeout_ms);
}
static const struct kobj_attribute preempt_timeout_attr =
__ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store);
static ssize_t
preempt_timeout_default(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
return sysfs_emit(buf, "%lu\n", engine->defaults.preempt_timeout_ms);
}
static const struct kobj_attribute preempt_timeout_def =
__ATTR(preempt_timeout_ms, 0444, preempt_timeout_default, NULL);
static ssize_t
heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
unsigned long long delay, clamped;
int err;
/*
* We monitor the health of the system via periodic heartbeat pulses.
* The pulses also provide the opportunity to perform garbage
* collection. However, we interpret an incomplete pulse (a missed
* heartbeat) as an indication that the system is no longer responsive,
* i.e. hung, and perform an engine or full GPU reset. Given that the
* preemption granularity can be very coarse on a system, the optimal
* value for any workload is unknowable!
*/
err = kstrtoull(buf, 0, &delay);
if (err)
return err;
clamped = intel_clamp_heartbeat_interval_ms(engine, delay);
if (delay != clamped)
return -EINVAL;
err = intel_engine_set_heartbeat(engine, delay);
if (err)
return err;
return count;
}
static ssize_t
heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
return sysfs_emit(buf, "%lu\n", engine->props.heartbeat_interval_ms);
}
static const struct kobj_attribute heartbeat_interval_attr =
__ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store);
static ssize_t
heartbeat_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_engine_cs *engine = kobj_to_engine(kobj);
return sysfs_emit(buf, "%lu\n", engine->defaults.heartbeat_interval_ms);
}
static const struct kobj_attribute heartbeat_interval_def =
__ATTR(heartbeat_interval_ms, 0444, heartbeat_default, NULL);
static void kobj_engine_release(struct kobject *kobj)
{
kfree(kobj);
}
static const struct kobj_type kobj_engine_type = {
.release = kobj_engine_release,
.sysfs_ops = &kobj_sysfs_ops
};
static struct kobject *
kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
{
struct kobj_engine *ke;
ke = kzalloc(sizeof(*ke), GFP_KERNEL);
if (!ke)
return NULL;
kobject_init(&ke->base, &kobj_engine_type);
ke->engine = engine;
if (kobject_add(&ke->base, dir, "%s", engine->name)) {
kobject_put(&ke->base);
return NULL;
}
/* xfer ownership to sysfs tree */
return &ke->base;
}
static void add_defaults(struct kobj_engine *parent)
{
static const struct attribute * const files[] = {
&max_spin_def.attr,
&stop_timeout_def.attr,
#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
&heartbeat_interval_def.attr,
#endif
NULL
};
struct kobj_engine *ke;
ke = kzalloc(sizeof(*ke), GFP_KERNEL);
if (!ke)
return;
kobject_init(&ke->base, &kobj_engine_type);
ke->engine = parent->engine;
if (kobject_add(&ke->base, &parent->base, "%s", ".defaults")) {
kobject_put(&ke->base);
return;
}
if (sysfs_create_files(&ke->base, files))
return;
if (intel_engine_has_timeslices(ke->engine) &&
sysfs_create_file(&ke->base, ×lice_duration_def.attr))
return;
if (intel_engine_has_preempt_reset(ke->engine) &&
sysfs_create_file(&ke->base, &preempt_timeout_def.attr))
return;
}
void intel_engines_add_sysfs(struct drm_i915_private *i915)
{
static const struct attribute * const files[] = {
&name_attr.attr,
&class_attr.attr,
&inst_attr.attr,
&mmio_attr.attr,
&caps_attr.attr,
&all_caps_attr.attr,
&max_spin_attr.attr,
&stop_timeout_attr.attr,
#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
&heartbeat_interval_attr.attr,
#endif
NULL
};
struct device *kdev = i915->drm.primary->kdev;
struct intel_engine_cs *engine;
struct kobject *dir;
dir = kobject_create_and_add("engine", &kdev->kobj);
if (!dir)
return;
for_each_uabi_engine(engine, i915) {
struct kobject *kobj;
kobj = kobj_engine(dir, engine);
if (!kobj)
goto err_engine;
if (sysfs_create_files(kobj, files))
goto err_object;
if (intel_engine_has_timeslices(engine) &&
sysfs_create_file(kobj, ×lice_duration_attr.attr))
goto err_engine;
if (intel_engine_has_preempt_reset(engine) &&
sysfs_create_file(kobj, &preempt_timeout_attr.attr))
goto err_engine;
add_defaults(container_of(kobj, struct kobj_engine, base));
if (0) {
err_object:
kobject_put(kobj);
err_engine:
dev_err(kdev, "Failed to add sysfs engine '%s'\n",
engine->name);
break;
}
}
}
| linux-master | drivers/gpu/drm/i915/gt/sysfs_engines.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
#include <linux/sort.h>
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
#include "selftests/igt_spinner.h"
#include "selftests/i915_random.h"
static const unsigned int sizes[] = {
SZ_4K,
SZ_64K,
SZ_2M,
CHUNK_SZ - SZ_4K,
CHUNK_SZ,
CHUNK_SZ + SZ_4K,
SZ_64M,
};
static struct drm_i915_gem_object *
create_lmem_or_internal(struct drm_i915_private *i915, size_t size)
{
struct drm_i915_gem_object *obj;
obj = i915_gem_object_create_lmem(i915, size, 0);
if (!IS_ERR(obj))
return obj;
return i915_gem_object_create_internal(i915, size);
}
static int copy(struct intel_migrate *migrate,
int (*fn)(struct intel_migrate *migrate,
struct i915_gem_ww_ctx *ww,
struct drm_i915_gem_object *src,
struct drm_i915_gem_object *dst,
struct i915_request **out),
u32 sz, struct rnd_state *prng)
{
struct drm_i915_private *i915 = migrate->context->engine->i915;
struct drm_i915_gem_object *src, *dst;
struct i915_request *rq;
struct i915_gem_ww_ctx ww;
u32 *vaddr;
int err = 0;
int i;
src = create_lmem_or_internal(i915, sz);
if (IS_ERR(src))
return 0;
sz = src->base.size;
dst = i915_gem_object_create_internal(i915, sz);
if (IS_ERR(dst))
goto err_free_src;
for_i915_gem_ww(&ww, err, true) {
err = i915_gem_object_lock(src, &ww);
if (err)
continue;
err = i915_gem_object_lock(dst, &ww);
if (err)
continue;
vaddr = i915_gem_object_pin_map(src, I915_MAP_WC);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
continue;
}
for (i = 0; i < sz / sizeof(u32); i++)
vaddr[i] = i;
i915_gem_object_flush_map(src);
vaddr = i915_gem_object_pin_map(dst, I915_MAP_WC);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto unpin_src;
}
for (i = 0; i < sz / sizeof(u32); i++)
vaddr[i] = ~i;
i915_gem_object_flush_map(dst);
err = fn(migrate, &ww, src, dst, &rq);
if (!err)
continue;
if (err != -EDEADLK && err != -EINTR && err != -ERESTARTSYS)
pr_err("%ps failed, size: %u\n", fn, sz);
if (rq) {
i915_request_wait(rq, 0, HZ);
i915_request_put(rq);
}
i915_gem_object_unpin_map(dst);
unpin_src:
i915_gem_object_unpin_map(src);
}
if (err)
goto err_out;
if (rq) {
if (i915_request_wait(rq, 0, HZ) < 0) {
pr_err("%ps timed out, size: %u\n", fn, sz);
err = -ETIME;
}
i915_request_put(rq);
}
for (i = 0; !err && i < sz / PAGE_SIZE; i++) {
int x = i * 1024 + i915_prandom_u32_max_state(1024, prng);
if (vaddr[x] != x) {
pr_err("%ps failed, size: %u, offset: %zu\n",
fn, sz, x * sizeof(u32));
igt_hexdump(vaddr + i * 1024, 4096);
err = -EINVAL;
}
}
i915_gem_object_unpin_map(dst);
i915_gem_object_unpin_map(src);
err_out:
i915_gem_object_put(dst);
err_free_src:
i915_gem_object_put(src);
return err;
}
static int intel_context_copy_ccs(struct intel_context *ce,
const struct i915_deps *deps,
struct scatterlist *sg,
unsigned int pat_index,
bool write_to_ccs,
struct i915_request **out)
{
u8 src_access = write_to_ccs ? DIRECT_ACCESS : INDIRECT_ACCESS;
u8 dst_access = write_to_ccs ? INDIRECT_ACCESS : DIRECT_ACCESS;
struct sgt_dma it = sg_sgt(sg);
struct i915_request *rq;
u32 offset;
int err;
GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
*out = NULL;
GEM_BUG_ON(ce->ring->size < SZ_64K);
offset = 0;
if (HAS_64K_PAGES(ce->engine->i915))
offset = CHUNK_SZ;
do {
int len;
rq = i915_request_create(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_ce;
}
if (deps) {
err = i915_request_await_deps(rq, deps);
if (err)
goto out_rq;
if (rq->engine->emit_init_breadcrumb) {
err = rq->engine->emit_init_breadcrumb(rq);
if (err)
goto out_rq;
}
deps = NULL;
}
/* The PTE updates + clear must not be interrupted. */
err = emit_no_arbitration(rq);
if (err)
goto out_rq;
len = emit_pte(rq, &it, pat_index, true, offset, CHUNK_SZ);
if (len <= 0) {
err = len;
goto out_rq;
}
err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
if (err)
goto out_rq;
err = emit_copy_ccs(rq, offset, dst_access,
offset, src_access, len);
if (err)
goto out_rq;
err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
/* Arbitration is re-enabled between requests. */
out_rq:
if (*out)
i915_request_put(*out);
*out = i915_request_get(rq);
i915_request_add(rq);
if (err || !it.sg || !sg_dma_len(it.sg))
break;
cond_resched();
} while (1);
out_ce:
return err;
}
static int
intel_migrate_ccs_copy(struct intel_migrate *m,
struct i915_gem_ww_ctx *ww,
const struct i915_deps *deps,
struct scatterlist *sg,
unsigned int pat_index,
bool write_to_ccs,
struct i915_request **out)
{
struct intel_context *ce;
int err;
*out = NULL;
if (!m->context)
return -ENODEV;
ce = intel_migrate_create_context(m);
if (IS_ERR(ce))
ce = intel_context_get(m->context);
GEM_BUG_ON(IS_ERR(ce));
err = intel_context_pin_ww(ce, ww);
if (err)
goto out;
err = intel_context_copy_ccs(ce, deps, sg, pat_index,
write_to_ccs, out);
intel_context_unpin(ce);
out:
intel_context_put(ce);
return err;
}
static int clear(struct intel_migrate *migrate,
int (*fn)(struct intel_migrate *migrate,
struct i915_gem_ww_ctx *ww,
struct drm_i915_gem_object *obj,
u32 value,
struct i915_request **out),
u32 sz, struct rnd_state *prng)
{
struct drm_i915_private *i915 = migrate->context->engine->i915;
struct drm_i915_gem_object *obj;
struct i915_request *rq;
struct i915_gem_ww_ctx ww;
u32 *vaddr, val = 0;
bool ccs_cap = false;
int err = 0;
int i;
obj = create_lmem_or_internal(i915, sz);
if (IS_ERR(obj))
return 0;
/* Consider the rounded up memory too */
sz = obj->base.size;
if (HAS_FLAT_CCS(i915) && i915_gem_object_is_lmem(obj))
ccs_cap = true;
for_i915_gem_ww(&ww, err, true) {
int ccs_bytes, ccs_bytes_per_chunk;
err = i915_gem_object_lock(obj, &ww);
if (err)
continue;
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
continue;
}
for (i = 0; i < sz / sizeof(u32); i++)
vaddr[i] = ~i;
i915_gem_object_flush_map(obj);
if (ccs_cap && !val) {
/* Write the obj data into ccs surface */
err = intel_migrate_ccs_copy(migrate, &ww, NULL,
obj->mm.pages->sgl,
obj->pat_index,
true, &rq);
if (rq && !err) {
if (i915_request_wait(rq, 0, HZ) < 0) {
pr_err("%ps timed out, size: %u\n",
fn, sz);
err = -ETIME;
}
i915_request_put(rq);
rq = NULL;
}
if (err)
continue;
}
err = fn(migrate, &ww, obj, val, &rq);
if (rq && !err) {
if (i915_request_wait(rq, 0, HZ) < 0) {
pr_err("%ps timed out, size: %u\n", fn, sz);
err = -ETIME;
}
i915_request_put(rq);
rq = NULL;
}
if (err)
continue;
i915_gem_object_flush_map(obj);
/* Verify the set/clear of the obj mem */
for (i = 0; !err && i < sz / PAGE_SIZE; i++) {
int x = i * 1024 +
i915_prandom_u32_max_state(1024, prng);
if (vaddr[x] != val) {
pr_err("%ps failed, (%u != %u), offset: %zu\n",
fn, vaddr[x], val, x * sizeof(u32));
igt_hexdump(vaddr + i * 1024, 4096);
err = -EINVAL;
}
}
if (err)
continue;
if (ccs_cap && !val) {
for (i = 0; i < sz / sizeof(u32); i++)
vaddr[i] = ~i;
i915_gem_object_flush_map(obj);
err = intel_migrate_ccs_copy(migrate, &ww, NULL,
obj->mm.pages->sgl,
obj->pat_index,
false, &rq);
if (rq && !err) {
if (i915_request_wait(rq, 0, HZ) < 0) {
pr_err("%ps timed out, size: %u\n",
fn, sz);
err = -ETIME;
}
i915_request_put(rq);
rq = NULL;
}
if (err)
continue;
ccs_bytes = GET_CCS_BYTES(i915, sz);
ccs_bytes_per_chunk = GET_CCS_BYTES(i915, CHUNK_SZ);
i915_gem_object_flush_map(obj);
for (i = 0; !err && i < DIV_ROUND_UP(ccs_bytes, PAGE_SIZE); i++) {
int offset = ((i * PAGE_SIZE) /
ccs_bytes_per_chunk) * CHUNK_SZ / sizeof(u32);
int ccs_bytes_left = (ccs_bytes - i * PAGE_SIZE) / sizeof(u32);
int x = i915_prandom_u32_max_state(min_t(int, 1024,
ccs_bytes_left), prng);
if (vaddr[offset + x]) {
pr_err("%ps ccs clearing failed, offset: %ld/%d\n",
fn, i * PAGE_SIZE + x * sizeof(u32), ccs_bytes);
igt_hexdump(vaddr + offset,
min_t(int, 4096,
ccs_bytes_left * sizeof(u32)));
err = -EINVAL;
}
}
if (err)
continue;
}
i915_gem_object_unpin_map(obj);
}
if (err) {
if (err != -EDEADLK && err != -EINTR && err != -ERESTARTSYS)
pr_err("%ps failed, size: %u\n", fn, sz);
if (rq && err != -EINVAL) {
i915_request_wait(rq, 0, HZ);
i915_request_put(rq);
}
i915_gem_object_unpin_map(obj);
}
i915_gem_object_put(obj);
return err;
}
static int __migrate_copy(struct intel_migrate *migrate,
struct i915_gem_ww_ctx *ww,
struct drm_i915_gem_object *src,
struct drm_i915_gem_object *dst,
struct i915_request **out)
{
return intel_migrate_copy(migrate, ww, NULL,
src->mm.pages->sgl, src->pat_index,
i915_gem_object_is_lmem(src),
dst->mm.pages->sgl, dst->pat_index,
i915_gem_object_is_lmem(dst),
out);
}
static int __global_copy(struct intel_migrate *migrate,
struct i915_gem_ww_ctx *ww,
struct drm_i915_gem_object *src,
struct drm_i915_gem_object *dst,
struct i915_request **out)
{
return intel_context_migrate_copy(migrate->context, NULL,
src->mm.pages->sgl, src->pat_index,
i915_gem_object_is_lmem(src),
dst->mm.pages->sgl, dst->pat_index,
i915_gem_object_is_lmem(dst),
out);
}
static int
migrate_copy(struct intel_migrate *migrate, u32 sz, struct rnd_state *prng)
{
return copy(migrate, __migrate_copy, sz, prng);
}
static int
global_copy(struct intel_migrate *migrate, u32 sz, struct rnd_state *prng)
{
return copy(migrate, __global_copy, sz, prng);
}
static int __migrate_clear(struct intel_migrate *migrate,
struct i915_gem_ww_ctx *ww,
struct drm_i915_gem_object *obj,
u32 value,
struct i915_request **out)
{
return intel_migrate_clear(migrate, ww, NULL,
obj->mm.pages->sgl,
obj->pat_index,
i915_gem_object_is_lmem(obj),
value, out);
}
static int __global_clear(struct intel_migrate *migrate,
struct i915_gem_ww_ctx *ww,
struct drm_i915_gem_object *obj,
u32 value,
struct i915_request **out)
{
return intel_context_migrate_clear(migrate->context, NULL,
obj->mm.pages->sgl,
obj->pat_index,
i915_gem_object_is_lmem(obj),
value, out);
}
static int
migrate_clear(struct intel_migrate *migrate, u32 sz, struct rnd_state *prng)
{
return clear(migrate, __migrate_clear, sz, prng);
}
static int
global_clear(struct intel_migrate *migrate, u32 sz, struct rnd_state *prng)
{
return clear(migrate, __global_clear, sz, prng);
}
static int live_migrate_copy(void *arg)
{
struct intel_gt *gt = arg;
struct intel_migrate *migrate = >->migrate;
struct drm_i915_private *i915 = migrate->context->engine->i915;
I915_RND_STATE(prng);
int i;
for (i = 0; i < ARRAY_SIZE(sizes); i++) {
int err;
err = migrate_copy(migrate, sizes[i], &prng);
if (err == 0)
err = global_copy(migrate, sizes[i], &prng);
i915_gem_drain_freed_objects(i915);
if (err)
return err;
}
return 0;
}
static int live_migrate_clear(void *arg)
{
struct intel_gt *gt = arg;
struct intel_migrate *migrate = >->migrate;
struct drm_i915_private *i915 = migrate->context->engine->i915;
I915_RND_STATE(prng);
int i;
for (i = 0; i < ARRAY_SIZE(sizes); i++) {
int err;
err = migrate_clear(migrate, sizes[i], &prng);
if (err == 0)
err = global_clear(migrate, sizes[i], &prng);
i915_gem_drain_freed_objects(i915);
if (err)
return err;
}
return 0;
}
struct spinner_timer {
struct timer_list timer;
struct igt_spinner spin;
};
static void spinner_kill(struct timer_list *timer)
{
struct spinner_timer *st = from_timer(st, timer, timer);
igt_spinner_end(&st->spin);
pr_info("%s\n", __func__);
}
static int live_emit_pte_full_ring(void *arg)
{
struct intel_gt *gt = arg;
struct intel_migrate *migrate = >->migrate;
struct drm_i915_private *i915 = migrate->context->engine->i915;
struct drm_i915_gem_object *obj;
struct intel_context *ce;
struct i915_request *rq, *prev;
struct spinner_timer st;
struct sgt_dma it;
int len, sz, err;
u32 *cs;
/*
* Simple regression test to check that we don't trample the
* rq->reserved_space when returning from emit_pte(), if the ring is
* nearly full.
*/
if (igt_spinner_init(&st.spin, to_gt(i915)))
return -ENOMEM;
obj = i915_gem_object_create_internal(i915, 2 * PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto out_spinner;
}
err = i915_gem_object_pin_pages_unlocked(obj);
if (err)
goto out_obj;
ce = intel_migrate_create_context(migrate);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out_obj;
}
ce->ring_size = SZ_4K; /* Not too big */
err = intel_context_pin(ce);
if (err)
goto out_put;
rq = igt_spinner_create_request(&st.spin, ce, MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_unpin;
}
i915_request_add(rq);
if (!igt_wait_for_spinner(&st.spin, rq)) {
err = -EIO;
goto out_unpin;
}
/*
* Fill the rest of the ring leaving I915_EMIT_PTE_NUM_DWORDS +
* ring->reserved_space at the end. To actually emit the PTEs we require
* slightly more than I915_EMIT_PTE_NUM_DWORDS, since our object size is
* greater than PAGE_SIZE. The correct behaviour is to wait for more
* ring space in emit_pte(), otherwise we trample on the reserved_space
* resulting in crashes when later submitting the rq.
*/
prev = NULL;
do {
if (prev)
i915_request_add(rq);
rq = i915_request_create(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_unpin;
}
sz = (rq->ring->space - rq->reserved_space) / sizeof(u32) -
I915_EMIT_PTE_NUM_DWORDS;
sz = min_t(u32, sz, (SZ_1K - rq->reserved_space) / sizeof(u32) -
I915_EMIT_PTE_NUM_DWORDS);
cs = intel_ring_begin(rq, sz);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto out_rq;
}
memset32(cs, MI_NOOP, sz);
cs += sz;
intel_ring_advance(rq, cs);
pr_info("%s emit=%u sz=%d\n", __func__, rq->ring->emit, sz);
prev = rq;
} while (rq->ring->space > (rq->reserved_space +
I915_EMIT_PTE_NUM_DWORDS * sizeof(u32)));
timer_setup_on_stack(&st.timer, spinner_kill, 0);
mod_timer(&st.timer, jiffies + 2 * HZ);
/*
* This should wait for the spinner to be killed, otherwise we should go
* down in flames when doing i915_request_add().
*/
pr_info("%s emite_pte ring space=%u\n", __func__, rq->ring->space);
it = sg_sgt(obj->mm.pages->sgl);
len = emit_pte(rq, &it, obj->pat_index, false, 0, CHUNK_SZ);
if (!len) {
err = -EINVAL;
goto out_rq;
}
if (len < 0) {
err = len;
goto out_rq;
}
out_rq:
i915_request_add(rq); /* GEM_BUG_ON(rq->reserved_space > ring->space)? */
del_timer_sync(&st.timer);
destroy_timer_on_stack(&st.timer);
out_unpin:
intel_context_unpin(ce);
out_put:
intel_context_put(ce);
out_obj:
i915_gem_object_put(obj);
out_spinner:
igt_spinner_fini(&st.spin);
return err;
}
struct threaded_migrate {
struct intel_migrate *migrate;
struct task_struct *tsk;
struct rnd_state prng;
};
static int threaded_migrate(struct intel_migrate *migrate,
int (*fn)(void *arg),
unsigned int flags)
{
const unsigned int n_cpus = num_online_cpus() + 1;
struct threaded_migrate *thread;
I915_RND_STATE(prng);
unsigned int i;
int err = 0;
thread = kcalloc(n_cpus, sizeof(*thread), GFP_KERNEL);
if (!thread)
return 0;
for (i = 0; i < n_cpus; ++i) {
struct task_struct *tsk;
thread[i].migrate = migrate;
thread[i].prng =
I915_RND_STATE_INITIALIZER(prandom_u32_state(&prng));
tsk = kthread_run(fn, &thread[i], "igt-%d", i);
if (IS_ERR(tsk)) {
err = PTR_ERR(tsk);
break;
}
get_task_struct(tsk);
thread[i].tsk = tsk;
}
msleep(10); /* start all threads before we kthread_stop() */
for (i = 0; i < n_cpus; ++i) {
struct task_struct *tsk = thread[i].tsk;
int status;
if (IS_ERR_OR_NULL(tsk))
continue;
status = kthread_stop(tsk);
if (status && !err)
err = status;
put_task_struct(tsk);
}
kfree(thread);
return err;
}
static int __thread_migrate_copy(void *arg)
{
struct threaded_migrate *tm = arg;
return migrate_copy(tm->migrate, 2 * CHUNK_SZ, &tm->prng);
}
static int thread_migrate_copy(void *arg)
{
struct intel_gt *gt = arg;
struct intel_migrate *migrate = >->migrate;
return threaded_migrate(migrate, __thread_migrate_copy, 0);
}
static int __thread_global_copy(void *arg)
{
struct threaded_migrate *tm = arg;
return global_copy(tm->migrate, 2 * CHUNK_SZ, &tm->prng);
}
static int thread_global_copy(void *arg)
{
struct intel_gt *gt = arg;
struct intel_migrate *migrate = >->migrate;
return threaded_migrate(migrate, __thread_global_copy, 0);
}
static int __thread_migrate_clear(void *arg)
{
struct threaded_migrate *tm = arg;
return migrate_clear(tm->migrate, 2 * CHUNK_SZ, &tm->prng);
}
static int __thread_global_clear(void *arg)
{
struct threaded_migrate *tm = arg;
return global_clear(tm->migrate, 2 * CHUNK_SZ, &tm->prng);
}
static int thread_migrate_clear(void *arg)
{
struct intel_gt *gt = arg;
struct intel_migrate *migrate = >->migrate;
return threaded_migrate(migrate, __thread_migrate_clear, 0);
}
static int thread_global_clear(void *arg)
{
struct intel_gt *gt = arg;
struct intel_migrate *migrate = >->migrate;
return threaded_migrate(migrate, __thread_global_clear, 0);
}
int intel_migrate_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_migrate_copy),
SUBTEST(live_migrate_clear),
SUBTEST(live_emit_pte_full_ring),
SUBTEST(thread_migrate_copy),
SUBTEST(thread_migrate_clear),
SUBTEST(thread_global_copy),
SUBTEST(thread_global_clear),
};
struct intel_gt *gt = to_gt(i915);
if (!gt->migrate.context)
return 0;
return intel_gt_live_subtests(tests, gt);
}
static struct drm_i915_gem_object *
create_init_lmem_internal(struct intel_gt *gt, size_t sz, bool try_lmem)
{
struct drm_i915_gem_object *obj = NULL;
int err;
if (try_lmem)
obj = i915_gem_object_create_lmem(gt->i915, sz, 0);
if (IS_ERR_OR_NULL(obj)) {
obj = i915_gem_object_create_internal(gt->i915, sz);
if (IS_ERR(obj))
return obj;
}
i915_gem_object_trylock(obj, NULL);
err = i915_gem_object_pin_pages(obj);
if (err) {
i915_gem_object_unlock(obj);
i915_gem_object_put(obj);
return ERR_PTR(err);
}
return obj;
}
static int wrap_ktime_compare(const void *A, const void *B)
{
const ktime_t *a = A, *b = B;
return ktime_compare(*a, *b);
}
static int __perf_clear_blt(struct intel_context *ce,
struct scatterlist *sg,
unsigned int pat_index,
bool is_lmem,
size_t sz)
{
ktime_t t[5];
int pass;
int err = 0;
for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
struct i915_request *rq;
ktime_t t0, t1;
t0 = ktime_get();
err = intel_context_migrate_clear(ce, NULL, sg, pat_index,
is_lmem, 0, &rq);
if (rq) {
if (i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT) < 0)
err = -EIO;
i915_request_put(rq);
}
if (err)
break;
t1 = ktime_get();
t[pass] = ktime_sub(t1, t0);
}
if (err)
return err;
sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
pr_info("%s: %zd KiB fill: %lld MiB/s\n",
ce->engine->name, sz >> 10,
div64_u64(mul_u32_u32(4 * sz,
1000 * 1000 * 1000),
t[1] + 2 * t[2] + t[3]) >> 20);
return 0;
}
static int perf_clear_blt(void *arg)
{
struct intel_gt *gt = arg;
static const unsigned long sizes[] = {
SZ_4K,
SZ_64K,
SZ_2M,
SZ_64M
};
int i;
for (i = 0; i < ARRAY_SIZE(sizes); i++) {
struct drm_i915_gem_object *dst;
int err;
dst = create_init_lmem_internal(gt, sizes[i], true);
if (IS_ERR(dst))
return PTR_ERR(dst);
err = __perf_clear_blt(gt->migrate.context,
dst->mm.pages->sgl,
i915_gem_get_pat_index(gt->i915,
I915_CACHE_NONE),
i915_gem_object_is_lmem(dst),
sizes[i]);
i915_gem_object_unlock(dst);
i915_gem_object_put(dst);
if (err)
return err;
}
return 0;
}
static int __perf_copy_blt(struct intel_context *ce,
struct scatterlist *src,
unsigned int src_pat_index,
bool src_is_lmem,
struct scatterlist *dst,
unsigned int dst_pat_index,
bool dst_is_lmem,
size_t sz)
{
ktime_t t[5];
int pass;
int err = 0;
for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
struct i915_request *rq;
ktime_t t0, t1;
t0 = ktime_get();
err = intel_context_migrate_copy(ce, NULL,
src, src_pat_index,
src_is_lmem,
dst, dst_pat_index,
dst_is_lmem,
&rq);
if (rq) {
if (i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT) < 0)
err = -EIO;
i915_request_put(rq);
}
if (err)
break;
t1 = ktime_get();
t[pass] = ktime_sub(t1, t0);
}
if (err)
return err;
sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
pr_info("%s: %zd KiB copy: %lld MiB/s\n",
ce->engine->name, sz >> 10,
div64_u64(mul_u32_u32(4 * sz,
1000 * 1000 * 1000),
t[1] + 2 * t[2] + t[3]) >> 20);
return 0;
}
static int perf_copy_blt(void *arg)
{
struct intel_gt *gt = arg;
static const unsigned long sizes[] = {
SZ_4K,
SZ_64K,
SZ_2M,
SZ_64M
};
int i;
for (i = 0; i < ARRAY_SIZE(sizes); i++) {
struct drm_i915_gem_object *src, *dst;
size_t sz;
int err;
src = create_init_lmem_internal(gt, sizes[i], true);
if (IS_ERR(src))
return PTR_ERR(src);
sz = src->base.size;
dst = create_init_lmem_internal(gt, sz, false);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
goto err_src;
}
err = __perf_copy_blt(gt->migrate.context,
src->mm.pages->sgl,
i915_gem_get_pat_index(gt->i915,
I915_CACHE_NONE),
i915_gem_object_is_lmem(src),
dst->mm.pages->sgl,
i915_gem_get_pat_index(gt->i915,
I915_CACHE_NONE),
i915_gem_object_is_lmem(dst),
sz);
i915_gem_object_unlock(dst);
i915_gem_object_put(dst);
err_src:
i915_gem_object_unlock(src);
i915_gem_object_put(src);
if (err)
return err;
}
return 0;
}
int intel_migrate_perf_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(perf_clear_blt),
SUBTEST(perf_copy_blt),
};
struct intel_gt *gt = to_gt(i915);
if (intel_gt_is_wedged(gt))
return 0;
if (!gt->migrate.context)
return 0;
return intel_gt_live_subtests(tests, gt);
}
| linux-master | drivers/gpu/drm/i915/gt/selftest_migrate.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
#include <asm/set_memory.h>
#include <asm/smp.h>
#include <linux/types.h>
#include <linux/stop_machine.h>
#include <drm/drm_managed.h>
#include <drm/i915_drm.h>
#include <drm/intel-gtt.h>
#include "display/intel_display.h"
#include "gem/i915_gem_lmem.h"
#include "intel_ggtt_gmch.h"
#include "intel_gt.h"
#include "intel_gt_regs.h"
#include "intel_pci_config.h"
#include "i915_drv.h"
#include "i915_pci.h"
#include "i915_scatterlist.h"
#include "i915_utils.h"
#include "i915_vgpu.h"
#include "intel_gtt.h"
#include "gen8_ppgtt.h"
static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
unsigned long color,
u64 *start,
u64 *end)
{
if (i915_node_color_differs(node, color))
*start += I915_GTT_PAGE_SIZE;
/*
* Also leave a space between the unallocated reserved node after the
* GTT and any objects within the GTT, i.e. we use the color adjustment
* to insert a guard page to prevent prefetches crossing over the
* GTT boundary.
*/
node = list_next_entry(node, node_list);
if (node->color != color)
*end -= I915_GTT_PAGE_SIZE;
}
static int ggtt_init_hw(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
ggtt->vm.is_ggtt = true;
/* Only VLV supports read-only GGTT mappings */
ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
if (ggtt->mappable_end) {
if (!io_mapping_init_wc(&ggtt->iomap,
ggtt->gmadr.start,
ggtt->mappable_end)) {
ggtt->vm.cleanup(&ggtt->vm);
return -EIO;
}
ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start,
ggtt->mappable_end);
}
intel_ggtt_init_fences(ggtt);
return 0;
}
/**
* i915_ggtt_init_hw - Initialize GGTT hardware
* @i915: i915 device
*/
int i915_ggtt_init_hw(struct drm_i915_private *i915)
{
int ret;
/*
* Note that we use page colouring to enforce a guard page at the
* end of the address space. This is required as the CS may prefetch
* beyond the end of the batch buffer, across the page boundary,
* and beyond the end of the GTT if we do not provide a guard.
*/
ret = ggtt_init_hw(to_gt(i915)->ggtt);
if (ret)
return ret;
return 0;
}
/**
* i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM
* @vm: The VM to suspend the mappings for
*
* Suspend the memory mappings for all objects mapped to HW via the GGTT or a
* DPT page table.
*/
void i915_ggtt_suspend_vm(struct i915_address_space *vm)
{
struct i915_vma *vma, *vn;
int save_skip_rewrite;
drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
retry:
i915_gem_drain_freed_objects(vm->i915);
mutex_lock(&vm->mutex);
/*
* Skip rewriting PTE on VMA unbind.
* FIXME: Use an argument to i915_vma_unbind() instead?
*/
save_skip_rewrite = vm->skip_pte_rewrite;
vm->skip_pte_rewrite = true;
list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
if (i915_vma_is_pinned(vma) || !i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
continue;
/* unlikely to race when GPU is idle, so no worry about slowpath.. */
if (WARN_ON(!i915_gem_object_trylock(obj, NULL))) {
/*
* No dead objects should appear here, GPU should be
* completely idle, and userspace suspended
*/
i915_gem_object_get(obj);
mutex_unlock(&vm->mutex);
i915_gem_object_lock(obj, NULL);
GEM_WARN_ON(i915_vma_unbind(vma));
i915_gem_object_unlock(obj);
i915_gem_object_put(obj);
vm->skip_pte_rewrite = save_skip_rewrite;
goto retry;
}
if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
i915_vma_wait_for_bind(vma);
__i915_vma_evict(vma, false);
drm_mm_remove_node(&vma->node);
}
i915_gem_object_unlock(obj);
}
vm->clear_range(vm, 0, vm->total);
vm->skip_pte_rewrite = save_skip_rewrite;
mutex_unlock(&vm->mutex);
}
void i915_ggtt_suspend(struct i915_ggtt *ggtt)
{
struct intel_gt *gt;
i915_ggtt_suspend_vm(&ggtt->vm);
ggtt->invalidate(ggtt);
list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
intel_gt_check_and_clear_faults(gt);
}
void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
{
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
spin_lock_irq(&uncore->lock);
intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
intel_uncore_read_fw(uncore, GFX_FLSH_CNTL_GEN6);
spin_unlock_irq(&uncore->lock);
}
static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
{
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
/*
* Note that as an uncached mmio write, this will flush the
* WCB of the writes into the GGTT before it triggers the invalidate.
*/
intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
}
static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
gen8_ggtt_invalidate(ggtt);
if (GRAPHICS_VER(i915) >= 12) {
struct intel_gt *gt;
list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
intel_uncore_write_fw(gt->uncore,
GEN12_GUC_TLB_INV_CR,
GEN12_GUC_TLB_INV_CR_INVALIDATE);
} else {
intel_uncore_write_fw(ggtt->vm.gt->uncore,
GEN8_GTCR, GEN8_GTCR_INVALIDATE);
}
}
static u64 mtl_ggtt_pte_encode(dma_addr_t addr,
unsigned int pat_index,
u32 flags)
{
gen8_pte_t pte = addr | GEN8_PAGE_PRESENT;
WARN_ON_ONCE(addr & ~GEN12_GGTT_PTE_ADDR_MASK);
if (flags & PTE_LM)
pte |= GEN12_GGTT_PTE_LM;
if (pat_index & BIT(0))
pte |= MTL_GGTT_PTE_PAT0;
if (pat_index & BIT(1))
pte |= MTL_GGTT_PTE_PAT1;
return pte;
}
u64 gen8_ggtt_pte_encode(dma_addr_t addr,
unsigned int pat_index,
u32 flags)
{
gen8_pte_t pte = addr | GEN8_PAGE_PRESENT;
if (flags & PTE_LM)
pte |= GEN12_GGTT_PTE_LM;
return pte;
}
static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
{
writeq(pte, addr);
}
static void gen8_ggtt_insert_page(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
unsigned int pat_index,
u32 flags)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
gen8_pte_t __iomem *pte =
(gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
gen8_set_pte(pte, ggtt->vm.pte_encode(addr, pat_index, flags));
ggtt->invalidate(ggtt);
}
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct i915_vma_resource *vma_res,
unsigned int pat_index,
u32 flags)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
const gen8_pte_t pte_encode = ggtt->vm.pte_encode(0, pat_index, flags);
gen8_pte_t __iomem *gte;
gen8_pte_t __iomem *end;
struct sgt_iter iter;
dma_addr_t addr;
/*
* Note that we ignore PTE_READ_ONLY here. The caller must be careful
* not to allow the user to override access to a read only page.
*/
gte = (gen8_pte_t __iomem *)ggtt->gsm;
gte += (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE;
end = gte + vma_res->guard / I915_GTT_PAGE_SIZE;
while (gte < end)
gen8_set_pte(gte++, vm->scratch[0]->encode);
end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE;
for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
gen8_set_pte(gte++, pte_encode | addr);
GEM_BUG_ON(gte > end);
/* Fill the allocated but "unused" space beyond the end of the buffer */
while (gte < end)
gen8_set_pte(gte++, vm->scratch[0]->encode);
/*
* We want to flush the TLBs only after we're certain all the PTE
* updates have finished.
*/
ggtt->invalidate(ggtt);
}
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
u64 start, u64 length)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
gen8_pte_t __iomem *gtt_base =
(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
int i;
if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n",
first_entry, num_entries, max_entries))
num_entries = max_entries;
for (i = 0; i < num_entries; i++)
gen8_set_pte(>t_base[i], scratch_pte);
}
static void gen6_ggtt_insert_page(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
unsigned int pat_index,
u32 flags)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
gen6_pte_t __iomem *pte =
(gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
iowrite32(vm->pte_encode(addr, pat_index, flags), pte);
ggtt->invalidate(ggtt);
}
/*
* Binds an object into the global gtt with the specified cache level.
* The object will be accessible to the GPU via commands whose operands
* reference offsets within the global GTT as well as accessible by the GPU
* through the GMADR mapped BAR (i915->mm.gtt->gtt).
*/
static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
struct i915_vma_resource *vma_res,
unsigned int pat_index,
u32 flags)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
gen6_pte_t __iomem *gte;
gen6_pte_t __iomem *end;
struct sgt_iter iter;
dma_addr_t addr;
gte = (gen6_pte_t __iomem *)ggtt->gsm;
gte += (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE;
end = gte + vma_res->guard / I915_GTT_PAGE_SIZE;
while (gte < end)
iowrite32(vm->scratch[0]->encode, gte++);
end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE;
for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
iowrite32(vm->pte_encode(addr, pat_index, flags), gte++);
GEM_BUG_ON(gte > end);
/* Fill the allocated but "unused" space beyond the end of the buffer */
while (gte < end)
iowrite32(vm->scratch[0]->encode, gte++);
/*
* We want to flush the TLBs only after we're certain all the PTE
* updates have finished.
*/
ggtt->invalidate(ggtt);
}
static void nop_clear_range(struct i915_address_space *vm,
u64 start, u64 length)
{
}
static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
{
/*
* Make sure the internal GAM fifo has been cleared of all GTT
* writes before exiting stop_machine(). This guarantees that
* any aperture accesses waiting to start in another process
* cannot back up behind the GTT writes causing a hang.
* The register can be any arbitrary GAM register.
*/
intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6);
}
struct insert_page {
struct i915_address_space *vm;
dma_addr_t addr;
u64 offset;
unsigned int pat_index;
};
static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
{
struct insert_page *arg = _arg;
gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset,
arg->pat_index, 0);
bxt_vtd_ggtt_wa(arg->vm);
return 0;
}
static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
unsigned int pat_index,
u32 unused)
{
struct insert_page arg = { vm, addr, offset, pat_index };
stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
}
struct insert_entries {
struct i915_address_space *vm;
struct i915_vma_resource *vma_res;
unsigned int pat_index;
u32 flags;
};
static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
{
struct insert_entries *arg = _arg;
gen8_ggtt_insert_entries(arg->vm, arg->vma_res,
arg->pat_index, arg->flags);
bxt_vtd_ggtt_wa(arg->vm);
return 0;
}
static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
struct i915_vma_resource *vma_res,
unsigned int pat_index,
u32 flags)
{
struct insert_entries arg = { vm, vma_res, pat_index, flags };
stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
}
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
u64 start, u64 length)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
gen6_pte_t scratch_pte, __iomem *gtt_base =
(gen6_pte_t __iomem *)ggtt->gsm + first_entry;
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
int i;
if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n",
first_entry, num_entries, max_entries))
num_entries = max_entries;
scratch_pte = vm->scratch[0]->encode;
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, >t_base[i]);
}
void intel_ggtt_bind_vma(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
struct i915_vma_resource *vma_res,
unsigned int pat_index,
u32 flags)
{
u32 pte_flags;
if (vma_res->bound_flags & (~flags & I915_VMA_BIND_MASK))
return;
vma_res->bound_flags |= flags;
/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
pte_flags = 0;
if (vma_res->bi.readonly)
pte_flags |= PTE_READ_ONLY;
if (vma_res->bi.lmem)
pte_flags |= PTE_LM;
vm->insert_entries(vm, vma_res, pat_index, pte_flags);
vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
}
void intel_ggtt_unbind_vma(struct i915_address_space *vm,
struct i915_vma_resource *vma_res)
{
vm->clear_range(vm, vma_res->start, vma_res->vma_size);
}
static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
{
u64 size;
int ret;
if (!intel_uc_uses_guc(&ggtt->vm.gt->uc))
return 0;
GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
size = ggtt->vm.total - GUC_GGTT_TOP;
ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw, size,
GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
PIN_NOEVICT);
if (ret)
drm_dbg(&ggtt->vm.i915->drm,
"Failed to reserve top of GGTT for GuC\n");
return ret;
}
static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
{
if (drm_mm_node_allocated(&ggtt->uc_fw))
drm_mm_remove_node(&ggtt->uc_fw);
}
static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
{
ggtt_release_guc_top(ggtt);
if (drm_mm_node_allocated(&ggtt->error_capture))
drm_mm_remove_node(&ggtt->error_capture);
mutex_destroy(&ggtt->error_mutex);
}
static int init_ggtt(struct i915_ggtt *ggtt)
{
/*
* Let GEM Manage all of the aperture.
*
* However, leave one page at the end still bound to the scratch page.
* There are a number of places where the hardware apparently prefetches
* past the end of the object, and we've seen multiple hangs with the
* GPU head pointer stuck in a batchbuffer bound at the last page of the
* aperture. One page should be enough to keep any prefetching inside
* of the aperture.
*/
unsigned long hole_start, hole_end;
struct drm_mm_node *entry;
int ret;
/*
* GuC requires all resources that we're sharing with it to be placed in
* non-WOPCM memory. If GuC is not present or not in use we still need a
* small bias as ring wraparound at offset 0 sometimes hangs. No idea
* why.
*/
ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
intel_wopcm_guc_size(&ggtt->vm.gt->wopcm));
ret = intel_vgt_balloon(ggtt);
if (ret)
return ret;
mutex_init(&ggtt->error_mutex);
if (ggtt->mappable_end) {
/*
* Reserve a mappable slot for our lockless error capture.
*
* We strongly prefer taking address 0x0 in order to protect
* other critical buffers against accidental overwrites,
* as writing to address 0 is a very common mistake.
*
* Since 0 may already be in use by the system (e.g. the BIOS
* framebuffer), we let the reservation fail quietly and hope
* 0 remains reserved always.
*
* If we fail to reserve 0, and then fail to find any space
* for an error-capture, remain silent. We can afford not
* to reserve an error_capture node as we have fallback
* paths, and we trust that 0 will remain reserved. However,
* the only likely reason for failure to insert is a driver
* bug, which we expect to cause other failures...
*
* Since CPU can perform speculative reads on error capture
* (write-combining allows it) add scratch page after error
* capture to avoid DMAR errors.
*/
ggtt->error_capture.size = 2 * I915_GTT_PAGE_SIZE;
ggtt->error_capture.color = I915_COLOR_UNEVICTABLE;
if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture))
drm_mm_insert_node_in_range(&ggtt->vm.mm,
&ggtt->error_capture,
ggtt->error_capture.size, 0,
ggtt->error_capture.color,
0, ggtt->mappable_end,
DRM_MM_INSERT_LOW);
}
if (drm_mm_node_allocated(&ggtt->error_capture)) {
u64 start = ggtt->error_capture.start;
u64 size = ggtt->error_capture.size;
ggtt->vm.scratch_range(&ggtt->vm, start, size);
drm_dbg(&ggtt->vm.i915->drm,
"Reserved GGTT:[%llx, %llx] for use by error capture\n",
start, start + size);
}
/*
* The upper portion of the GuC address space has a sizeable hole
* (several MB) that is inaccessible by GuC. Reserve this range within
* GGTT as it can comfortably hold GuC/HuC firmware images.
*/
ret = ggtt_reserve_guc_top(ggtt);
if (ret)
goto err;
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
drm_dbg(&ggtt->vm.i915->drm,
"clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
ggtt->vm.clear_range(&ggtt->vm, hole_start,
hole_end - hole_start);
}
/* And finally clear the reserved guard page */
ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
return 0;
err:
cleanup_init_ggtt(ggtt);
return ret;
}
static void aliasing_gtt_bind_vma(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
struct i915_vma_resource *vma_res,
unsigned int pat_index,
u32 flags)
{
u32 pte_flags;
/* Currently applicable only to VLV */
pte_flags = 0;
if (vma_res->bi.readonly)
pte_flags |= PTE_READ_ONLY;
if (flags & I915_VMA_LOCAL_BIND)
ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm,
stash, vma_res, pat_index, flags);
if (flags & I915_VMA_GLOBAL_BIND)
vm->insert_entries(vm, vma_res, pat_index, pte_flags);
vma_res->bound_flags |= flags;
}
static void aliasing_gtt_unbind_vma(struct i915_address_space *vm,
struct i915_vma_resource *vma_res)
{
if (vma_res->bound_flags & I915_VMA_GLOBAL_BIND)
vm->clear_range(vm, vma_res->start, vma_res->vma_size);
if (vma_res->bound_flags & I915_VMA_LOCAL_BIND)
ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma_res);
}
static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
{
struct i915_vm_pt_stash stash = {};
struct i915_ppgtt *ppgtt;
int err;
ppgtt = i915_ppgtt_create(ggtt->vm.gt, 0);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
err = -ENODEV;
goto err_ppgtt;
}
err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, ggtt->vm.total);
if (err)
goto err_ppgtt;
i915_gem_object_lock(ppgtt->vm.scratch[0], NULL);
err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
i915_gem_object_unlock(ppgtt->vm.scratch[0]);
if (err)
goto err_stash;
/*
* Note we only pre-allocate as far as the end of the global
* GTT. On 48b / 4-level page-tables, the difference is very,
* very significant! We have to preallocate as GVT/vgpu does
* not like the page directory disappearing.
*/
ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, ggtt->vm.total);
ggtt->alias = ppgtt;
ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags;
GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != intel_ggtt_bind_vma);
ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != intel_ggtt_unbind_vma);
ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
i915_vm_free_pt_stash(&ppgtt->vm, &stash);
return 0;
err_stash:
i915_vm_free_pt_stash(&ppgtt->vm, &stash);
err_ppgtt:
i915_vm_put(&ppgtt->vm);
return err;
}
static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
{
struct i915_ppgtt *ppgtt;
ppgtt = fetch_and_zero(&ggtt->alias);
if (!ppgtt)
return;
i915_vm_put(&ppgtt->vm);
ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
}
int i915_init_ggtt(struct drm_i915_private *i915)
{
int ret;
ret = init_ggtt(to_gt(i915)->ggtt);
if (ret)
return ret;
if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) {
ret = init_aliasing_ppgtt(to_gt(i915)->ggtt);
if (ret)
cleanup_init_ggtt(to_gt(i915)->ggtt);
}
return 0;
}
static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
{
struct i915_vma *vma, *vn;
flush_workqueue(ggtt->vm.i915->wq);
i915_gem_drain_freed_objects(ggtt->vm.i915);
mutex_lock(&ggtt->vm.mutex);
ggtt->vm.skip_pte_rewrite = true;
list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
bool trylock;
trylock = i915_gem_object_trylock(obj, NULL);
WARN_ON(!trylock);
WARN_ON(__i915_vma_unbind(vma));
if (trylock)
i915_gem_object_unlock(obj);
}
if (drm_mm_node_allocated(&ggtt->error_capture))
drm_mm_remove_node(&ggtt->error_capture);
mutex_destroy(&ggtt->error_mutex);
ggtt_release_guc_top(ggtt);
intel_vgt_deballoon(ggtt);
ggtt->vm.cleanup(&ggtt->vm);
mutex_unlock(&ggtt->vm.mutex);
i915_address_space_fini(&ggtt->vm);
arch_phys_wc_del(ggtt->mtrr);
if (ggtt->iomap.size)
io_mapping_fini(&ggtt->iomap);
}
/**
* i915_ggtt_driver_release - Clean up GGTT hardware initialization
* @i915: i915 device
*/
void i915_ggtt_driver_release(struct drm_i915_private *i915)
{
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
fini_aliasing_ppgtt(ggtt);
intel_ggtt_fini_fences(ggtt);
ggtt_cleanup_hw(ggtt);
}
/**
* i915_ggtt_driver_late_release - Cleanup of GGTT that needs to be done after
* all free objects have been drained.
* @i915: i915 device
*/
void i915_ggtt_driver_late_release(struct drm_i915_private *i915)
{
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
GEM_WARN_ON(kref_read(&ggtt->vm.resv_ref) != 1);
dma_resv_fini(&ggtt->vm._resv);
}
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
return snb_gmch_ctl << 20;
}
static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
{
bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
if (bdw_gmch_ctl)
bdw_gmch_ctl = 1 << bdw_gmch_ctl;
#ifdef CONFIG_X86_32
/* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
if (bdw_gmch_ctl > 4)
bdw_gmch_ctl = 4;
#endif
return bdw_gmch_ctl << 20;
}
static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
{
gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
gmch_ctrl &= SNB_GMCH_GGMS_MASK;
if (gmch_ctrl)
return 1 << (20 + gmch_ctrl);
return 0;
}
static unsigned int gen6_gttmmadr_size(struct drm_i915_private *i915)
{
/*
* GEN6: GTTMMADR size is 4MB and GTTADR starts at 2MB offset
* GEN8: GTTMMADR size is 16MB and GTTADR starts at 8MB offset
*/
GEM_BUG_ON(GRAPHICS_VER(i915) < 6);
return (GRAPHICS_VER(i915) < 8) ? SZ_4M : SZ_16M;
}
static unsigned int gen6_gttadr_offset(struct drm_i915_private *i915)
{
return gen6_gttmmadr_size(i915) / 2;
}
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
phys_addr_t phys_addr;
u32 pte_flags;
int ret;
GEM_WARN_ON(pci_resource_len(pdev, GEN4_GTTMMADR_BAR) != gen6_gttmmadr_size(i915));
phys_addr = pci_resource_start(pdev, GEN4_GTTMMADR_BAR) + gen6_gttadr_offset(i915);
/*
* On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
* will be dropped. For WC mappings in general we have 64 byte burst
* writes when the WC buffer is flushed, so we can't use it, but have to
* resort to an uncached mapping. The WC issue is easily caught by the
* readback check when writing GTT PTE entries.
*/
if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 11)
ggtt->gsm = ioremap(phys_addr, size);
else
ggtt->gsm = ioremap_wc(phys_addr, size);
if (!ggtt->gsm) {
drm_err(&i915->drm, "Failed to map the ggtt page table\n");
return -ENOMEM;
}
kref_init(&ggtt->vm.resv_ref);
ret = setup_scratch_page(&ggtt->vm);
if (ret) {
drm_err(&i915->drm, "Scratch setup failed\n");
/* iounmap will also get called at remove, but meh */
iounmap(ggtt->gsm);
return ret;
}
pte_flags = 0;
if (i915_gem_object_is_lmem(ggtt->vm.scratch[0]))
pte_flags |= PTE_LM;
ggtt->vm.scratch[0]->encode =
ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]),
i915_gem_get_pat_index(i915,
I915_CACHE_NONE),
pte_flags);
return 0;
}
static void gen6_gmch_remove(struct i915_address_space *vm)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
iounmap(ggtt->gsm);
free_scratch(vm);
}
static struct resource pci_resource(struct pci_dev *pdev, int bar)
{
return DEFINE_RES_MEM(pci_resource_start(pdev, bar),
pci_resource_len(pdev, bar));
}
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
unsigned int size;
u16 snb_gmch_ctl;
if (!HAS_LMEM(i915) && !HAS_LMEMBAR_SMEM_STOLEN(i915)) {
if (!i915_pci_resource_valid(pdev, GEN4_GMADR_BAR))
return -ENXIO;
ggtt->gmadr = pci_resource(pdev, GEN4_GMADR_BAR);
ggtt->mappable_end = resource_size(&ggtt->gmadr);
}
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
if (IS_CHERRYVIEW(i915))
size = chv_get_total_gtt_size(snb_gmch_ctl);
else
size = gen8_get_total_gtt_size(snb_gmch_ctl);
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY;
ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
ggtt->vm.cleanup = gen6_gmch_remove;
ggtt->vm.insert_page = gen8_ggtt_insert_page;
ggtt->vm.clear_range = nop_clear_range;
ggtt->vm.scratch_range = gen8_ggtt_clear_range;
ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
/*
* Serialize GTT updates with aperture access on BXT if VT-d is on,
* and always on CHV.
*/
if (intel_vm_no_concurrent_access_wa(i915)) {
ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
/*
* Calling stop_machine() version of GGTT update function
* at error capture/reset path will raise lockdep warning.
* Allow calling gen8_ggtt_insert_* directly at reset path
* which is safe from parallel GGTT updates.
*/
ggtt->vm.raw_insert_page = gen8_ggtt_insert_page;
ggtt->vm.raw_insert_entries = gen8_ggtt_insert_entries;
ggtt->vm.bind_async_flags =
I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
}
if (intel_uc_wants_guc(&ggtt->vm.gt->uc))
ggtt->invalidate = guc_ggtt_invalidate;
else
ggtt->invalidate = gen8_ggtt_invalidate;
ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
ggtt->vm.pte_encode = mtl_ggtt_pte_encode;
else
ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
return ggtt_probe_common(ggtt, size);
}
/*
* For pre-gen8 platforms pat_index is the same as enum i915_cache_level,
* so the switch-case statements in these PTE encode functions are still valid.
* See translation table LEGACY_CACHELEVEL.
*/
static u64 snb_pte_encode(dma_addr_t addr,
unsigned int pat_index,
u32 flags)
{
gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
switch (pat_index) {
case I915_CACHE_L3_LLC:
case I915_CACHE_LLC:
pte |= GEN6_PTE_CACHE_LLC;
break;
case I915_CACHE_NONE:
pte |= GEN6_PTE_UNCACHED;
break;
default:
MISSING_CASE(pat_index);
}
return pte;
}
static u64 ivb_pte_encode(dma_addr_t addr,
unsigned int pat_index,
u32 flags)
{
gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
switch (pat_index) {
case I915_CACHE_L3_LLC:
pte |= GEN7_PTE_CACHE_L3_LLC;
break;
case I915_CACHE_LLC:
pte |= GEN6_PTE_CACHE_LLC;
break;
case I915_CACHE_NONE:
pte |= GEN6_PTE_UNCACHED;
break;
default:
MISSING_CASE(pat_index);
}
return pte;
}
static u64 byt_pte_encode(dma_addr_t addr,
unsigned int pat_index,
u32 flags)
{
gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
if (!(flags & PTE_READ_ONLY))
pte |= BYT_PTE_WRITEABLE;
if (pat_index != I915_CACHE_NONE)
pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
return pte;
}
static u64 hsw_pte_encode(dma_addr_t addr,
unsigned int pat_index,
u32 flags)
{
gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
if (pat_index != I915_CACHE_NONE)
pte |= HSW_WB_LLC_AGE3;
return pte;
}
static u64 iris_pte_encode(dma_addr_t addr,
unsigned int pat_index,
u32 flags)
{
gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
switch (pat_index) {
case I915_CACHE_NONE:
break;
case I915_CACHE_WT:
pte |= HSW_WT_ELLC_LLC_AGE3;
break;
default:
pte |= HSW_WB_ELLC_LLC_AGE3;
break;
}
return pte;
}
static int gen6_gmch_probe(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
unsigned int size;
u16 snb_gmch_ctl;
if (!i915_pci_resource_valid(pdev, GEN4_GMADR_BAR))
return -ENXIO;
ggtt->gmadr = pci_resource(pdev, GEN4_GMADR_BAR);
ggtt->mappable_end = resource_size(&ggtt->gmadr);
/*
* 64/512MB is the current min/max we actually know of, but this is
* just a coarse sanity check.
*/
if (ggtt->mappable_end < (64 << 20) ||
ggtt->mappable_end > (512 << 20)) {
drm_err(&i915->drm, "Unknown GMADR size (%pa)\n",
&ggtt->mappable_end);
return -ENXIO;
}
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
size = gen6_get_total_gtt_size(snb_gmch_ctl);
ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
ggtt->vm.clear_range = nop_clear_range;
if (!HAS_FULL_PPGTT(i915))
ggtt->vm.clear_range = gen6_ggtt_clear_range;
ggtt->vm.scratch_range = gen6_ggtt_clear_range;
ggtt->vm.insert_page = gen6_ggtt_insert_page;
ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
ggtt->vm.cleanup = gen6_gmch_remove;
ggtt->invalidate = gen6_ggtt_invalidate;
if (HAS_EDRAM(i915))
ggtt->vm.pte_encode = iris_pte_encode;
else if (IS_HASWELL(i915))
ggtt->vm.pte_encode = hsw_pte_encode;
else if (IS_VALLEYVIEW(i915))
ggtt->vm.pte_encode = byt_pte_encode;
else if (GRAPHICS_VER(i915) >= 7)
ggtt->vm.pte_encode = ivb_pte_encode;
else
ggtt->vm.pte_encode = snb_pte_encode;
ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
return ggtt_probe_common(ggtt, size);
}
static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
int ret;
ggtt->vm.gt = gt;
ggtt->vm.i915 = i915;
ggtt->vm.dma = i915->drm.dev;
dma_resv_init(&ggtt->vm._resv);
if (GRAPHICS_VER(i915) >= 8)
ret = gen8_gmch_probe(ggtt);
else if (GRAPHICS_VER(i915) >= 6)
ret = gen6_gmch_probe(ggtt);
else
ret = intel_ggtt_gmch_probe(ggtt);
if (ret) {
dma_resv_fini(&ggtt->vm._resv);
return ret;
}
if ((ggtt->vm.total - 1) >> 32) {
drm_err(&i915->drm,
"We never expected a Global GTT with more than 32bits"
" of address space! Found %lldM!\n",
ggtt->vm.total >> 20);
ggtt->vm.total = 1ULL << 32;
ggtt->mappable_end =
min_t(u64, ggtt->mappable_end, ggtt->vm.total);
}
if (ggtt->mappable_end > ggtt->vm.total) {
drm_err(&i915->drm,
"mappable aperture extends past end of GGTT,"
" aperture=%pa, total=%llx\n",
&ggtt->mappable_end, ggtt->vm.total);
ggtt->mappable_end = ggtt->vm.total;
}
/* GMADR is the PCI mmio aperture into the global GTT. */
drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20);
drm_dbg(&i915->drm, "GMADR size = %lluM\n",
(u64)ggtt->mappable_end >> 20);
drm_dbg(&i915->drm, "DSM size = %lluM\n",
(u64)resource_size(&intel_graphics_stolen_res) >> 20);
return 0;
}
/**
* i915_ggtt_probe_hw - Probe GGTT hardware location
* @i915: i915 device
*/
int i915_ggtt_probe_hw(struct drm_i915_private *i915)
{
struct intel_gt *gt;
int ret, i;
for_each_gt(gt, i915, i) {
ret = intel_gt_assign_ggtt(gt);
if (ret)
return ret;
}
ret = ggtt_probe_hw(to_gt(i915)->ggtt, to_gt(i915));
if (ret)
return ret;
if (i915_vtd_active(i915))
drm_info(&i915->drm, "VT-d active for gfx access\n");
return 0;
}
struct i915_ggtt *i915_ggtt_create(struct drm_i915_private *i915)
{
struct i915_ggtt *ggtt;
ggtt = drmm_kzalloc(&i915->drm, sizeof(*ggtt), GFP_KERNEL);
if (!ggtt)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&ggtt->gt_list);
return ggtt;
}
int i915_ggtt_enable_hw(struct drm_i915_private *i915)
{
if (GRAPHICS_VER(i915) < 6)
return intel_ggtt_gmch_enable_hw(i915);
return 0;
}
/**
* i915_ggtt_resume_vm - Restore the memory mappings for a GGTT or DPT VM
* @vm: The VM to restore the mappings for
*
* Restore the memory mappings for all objects mapped to HW via the GGTT or a
* DPT page table.
*
* Returns %true if restoring the mapping for any object that was in a write
* domain before suspend.
*/
bool i915_ggtt_resume_vm(struct i915_address_space *vm)
{
struct i915_vma *vma;
bool write_domain_objs = false;
drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
/* First fill our portion of the GTT with scratch pages */
vm->clear_range(vm, 0, vm->total);
/* clflush objects bound into the GGTT and rebind them. */
list_for_each_entry(vma, &vm->bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
unsigned int was_bound =
atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
GEM_BUG_ON(!was_bound);
/*
* Clear the bound flags of the vma resource to allow
* ptes to be repopulated.
*/
vma->resource->bound_flags = 0;
vma->ops->bind_vma(vm, NULL, vma->resource,
obj ? obj->pat_index :
i915_gem_get_pat_index(vm->i915,
I915_CACHE_NONE),
was_bound);
if (obj) { /* only used during resume => exclusive access */
write_domain_objs |= fetch_and_zero(&obj->write_domain);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
}
}
return write_domain_objs;
}
void i915_ggtt_resume(struct i915_ggtt *ggtt)
{
struct intel_gt *gt;
bool flush;
list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
intel_gt_check_and_clear_faults(gt);
flush = i915_ggtt_resume_vm(&ggtt->vm);
if (drm_mm_node_allocated(&ggtt->error_capture))
ggtt->vm.scratch_range(&ggtt->vm, ggtt->error_capture.start,
ggtt->error_capture.size);
list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
intel_uc_resume_mappings(>->uc);
ggtt->invalidate(ggtt);
if (flush)
wbinvd_on_all_cpus();
intel_ggtt_restore_fences(ggtt);
}
| linux-master | drivers/gpu/drm/i915/gt/intel_ggtt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright © 2018 Intel Corporation
*/
#include <linux/sort.h>
#include "intel_gpu_commands.h"
#include "intel_gt_pm.h"
#include "intel_rps.h"
#include "i915_selftest.h"
#include "selftests/igt_flush_test.h"
#define COUNT 5
static int cmp_u32(const void *A, const void *B)
{
const u32 *a = A, *b = B;
return *a - *b;
}
static void perf_begin(struct intel_gt *gt)
{
intel_gt_pm_get(gt);
/* Boost gpufreq to max [waitboost] and keep it fixed */
atomic_inc(>->rps.num_waiters);
queue_work(gt->i915->unordered_wq, >->rps.work);
flush_work(>->rps.work);
}
static int perf_end(struct intel_gt *gt)
{
atomic_dec(>->rps.num_waiters);
intel_gt_pm_put(gt);
return igt_flush_test(gt->i915);
}
static i915_reg_t timestamp_reg(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
if (GRAPHICS_VER(i915) == 5 || IS_G4X(i915))
return RING_TIMESTAMP_UDW(engine->mmio_base);
else
return RING_TIMESTAMP(engine->mmio_base);
}
static int write_timestamp(struct i915_request *rq, int slot)
{
struct intel_timeline *tl =
rcu_dereference_protected(rq->timeline,
!i915_request_signaled(rq));
u32 cmd;
u32 *cs;
cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
if (GRAPHICS_VER(rq->i915) >= 8)
cmd++;
*cs++ = cmd;
*cs++ = i915_mmio_reg_offset(timestamp_reg(rq->engine));
*cs++ = tl->hwsp_offset + slot * sizeof(u32);
*cs++ = 0;
intel_ring_advance(rq, cs);
return 0;
}
static struct i915_vma *create_empty_batch(struct intel_context *ce)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 *cs;
int err;
obj = i915_gem_object_create_internal(ce->engine->i915, PAGE_SIZE);
if (IS_ERR(obj))
return ERR_CAST(obj);
cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err_put;
}
cs[0] = MI_BATCH_BUFFER_END;
i915_gem_object_flush_map(obj);
vma = i915_vma_instance(obj, ce->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_unpin;
}
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err)
goto err_unpin;
i915_gem_object_unpin_map(obj);
return vma;
err_unpin:
i915_gem_object_unpin_map(obj);
err_put:
i915_gem_object_put(obj);
return ERR_PTR(err);
}
static u32 trifilter(u32 *a)
{
u64 sum;
sort(a, COUNT, sizeof(*a), cmp_u32, NULL);
sum = mul_u32_u32(a[2], 2);
sum += a[1];
sum += a[3];
return sum >> 2;
}
static int perf_mi_bb_start(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = 0;
if (GRAPHICS_VER(gt->i915) < 4) /* Any CS_TIMESTAMP? */
return 0;
perf_begin(gt);
for_each_engine(engine, gt, id) {
struct intel_context *ce = engine->kernel_context;
struct i915_vma *batch;
u32 cycles[COUNT];
int i;
if (GRAPHICS_VER(engine->i915) < 7 && engine->id != RCS0)
continue;
intel_engine_pm_get(engine);
batch = create_empty_batch(ce);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
intel_engine_pm_put(engine);
break;
}
err = i915_vma_sync(batch);
if (err) {
intel_engine_pm_put(engine);
i915_vma_put(batch);
break;
}
for (i = 0; i < ARRAY_SIZE(cycles); i++) {
struct i915_request *rq;
rq = i915_request_create(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
break;
}
err = write_timestamp(rq, 2);
if (err)
goto out;
err = rq->engine->emit_bb_start(rq,
i915_vma_offset(batch), 8,
0);
if (err)
goto out;
err = write_timestamp(rq, 3);
if (err)
goto out;
out:
i915_request_get(rq);
i915_request_add(rq);
if (i915_request_wait(rq, 0, HZ / 5) < 0)
err = -EIO;
i915_request_put(rq);
if (err)
break;
cycles[i] = rq->hwsp_seqno[3] - rq->hwsp_seqno[2];
}
i915_vma_put(batch);
intel_engine_pm_put(engine);
if (err)
break;
pr_info("%s: MI_BB_START cycles: %u\n",
engine->name, trifilter(cycles));
}
if (perf_end(gt))
err = -EIO;
return err;
}
static struct i915_vma *create_nop_batch(struct intel_context *ce)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 *cs;
int err;
obj = i915_gem_object_create_internal(ce->engine->i915, SZ_64K);
if (IS_ERR(obj))
return ERR_CAST(obj);
cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err_put;
}
memset(cs, 0, SZ_64K);
cs[SZ_64K / sizeof(*cs) - 1] = MI_BATCH_BUFFER_END;
i915_gem_object_flush_map(obj);
vma = i915_vma_instance(obj, ce->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_unpin;
}
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err)
goto err_unpin;
i915_gem_object_unpin_map(obj);
return vma;
err_unpin:
i915_gem_object_unpin_map(obj);
err_put:
i915_gem_object_put(obj);
return ERR_PTR(err);
}
static int perf_mi_noop(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = 0;
if (GRAPHICS_VER(gt->i915) < 4) /* Any CS_TIMESTAMP? */
return 0;
perf_begin(gt);
for_each_engine(engine, gt, id) {
struct intel_context *ce = engine->kernel_context;
struct i915_vma *base, *nop;
u32 cycles[COUNT];
int i;
if (GRAPHICS_VER(engine->i915) < 7 && engine->id != RCS0)
continue;
intel_engine_pm_get(engine);
base = create_empty_batch(ce);
if (IS_ERR(base)) {
err = PTR_ERR(base);
intel_engine_pm_put(engine);
break;
}
err = i915_vma_sync(base);
if (err) {
i915_vma_put(base);
intel_engine_pm_put(engine);
break;
}
nop = create_nop_batch(ce);
if (IS_ERR(nop)) {
err = PTR_ERR(nop);
i915_vma_put(base);
intel_engine_pm_put(engine);
break;
}
err = i915_vma_sync(nop);
if (err) {
i915_vma_put(nop);
i915_vma_put(base);
intel_engine_pm_put(engine);
break;
}
for (i = 0; i < ARRAY_SIZE(cycles); i++) {
struct i915_request *rq;
rq = i915_request_create(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
break;
}
err = write_timestamp(rq, 2);
if (err)
goto out;
err = rq->engine->emit_bb_start(rq,
i915_vma_offset(base), 8,
0);
if (err)
goto out;
err = write_timestamp(rq, 3);
if (err)
goto out;
err = rq->engine->emit_bb_start(rq,
i915_vma_offset(nop),
i915_vma_size(nop),
0);
if (err)
goto out;
err = write_timestamp(rq, 4);
if (err)
goto out;
out:
i915_request_get(rq);
i915_request_add(rq);
if (i915_request_wait(rq, 0, HZ / 5) < 0)
err = -EIO;
i915_request_put(rq);
if (err)
break;
cycles[i] =
(rq->hwsp_seqno[4] - rq->hwsp_seqno[3]) -
(rq->hwsp_seqno[3] - rq->hwsp_seqno[2]);
}
i915_vma_put(nop);
i915_vma_put(base);
intel_engine_pm_put(engine);
if (err)
break;
pr_info("%s: 16K MI_NOOP cycles: %u\n",
engine->name, trifilter(cycles));
}
if (perf_end(gt))
err = -EIO;
return err;
}
int intel_engine_cs_perf_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(perf_mi_bb_start),
SUBTEST(perf_mi_noop),
};
if (intel_gt_is_wedged(to_gt(i915)))
return 0;
return intel_gt_live_subtests(tests, to_gt(i915));
}
static int intel_mmio_bases_check(void *arg)
{
int i, j;
for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
const struct engine_info *info = &intel_engines[i];
u8 prev = U8_MAX;
for (j = 0; j < MAX_MMIO_BASES; j++) {
u8 ver = info->mmio_bases[j].graphics_ver;
u32 base = info->mmio_bases[j].base;
if (ver >= prev) {
pr_err("%s(%s, class:%d, instance:%d): mmio base for graphics ver %u is before the one for ver %u\n",
__func__,
intel_engine_class_repr(info->class),
info->class, info->instance,
prev, ver);
return -EINVAL;
}
if (ver == 0)
break;
if (!base) {
pr_err("%s(%s, class:%d, instance:%d): invalid mmio base (%x) for graphics ver %u at entry %u\n",
__func__,
intel_engine_class_repr(info->class),
info->class, info->instance,
base, ver, j);
return -EINVAL;
}
prev = ver;
}
pr_debug("%s: min graphics version supported for %s%d is %u\n",
__func__,
intel_engine_class_repr(info->class),
info->instance,
prev);
}
return 0;
}
int intel_engine_cs_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
SUBTEST(intel_mmio_bases_check),
};
return i915_subtests(tests, NULL);
}
| linux-master | drivers/gpu/drm/i915/gt/selftest_engine_cs.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
#include "intel_engine_pm.h"
#include "selftests/igt_flush_test.h"
static struct i915_vma *create_wally(struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 *cs;
int err;
obj = i915_gem_object_create_internal(engine->i915, 4096);
if (IS_ERR(obj))
return ERR_CAST(obj);
vma = i915_vma_instance(obj, engine->gt->vm, NULL);
if (IS_ERR(vma)) {
i915_gem_object_put(obj);
return vma;
}
err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
if (err) {
i915_gem_object_put(obj);
return ERR_PTR(err);
}
err = i915_vma_sync(vma);
if (err) {
i915_gem_object_put(obj);
return ERR_PTR(err);
}
cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(cs)) {
i915_gem_object_put(obj);
return ERR_CAST(cs);
}
if (GRAPHICS_VER(engine->i915) >= 6) {
*cs++ = MI_STORE_DWORD_IMM_GEN4;
*cs++ = 0;
} else if (GRAPHICS_VER(engine->i915) >= 4) {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = 0;
} else {
*cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
}
*cs++ = i915_vma_offset(vma) + 4000;
*cs++ = STACK_MAGIC;
*cs++ = MI_BATCH_BUFFER_END;
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
vma->private = intel_context_create(engine); /* dummy residuals */
if (IS_ERR(vma->private)) {
vma = ERR_CAST(vma->private);
i915_gem_object_put(obj);
}
return vma;
}
static int context_sync(struct intel_context *ce)
{
struct i915_request *rq;
int err = 0;
rq = intel_context_create_request(ce);
if (IS_ERR(rq))
return PTR_ERR(rq);
i915_request_get(rq);
i915_request_add(rq);
if (i915_request_wait(rq, 0, HZ / 5) < 0)
err = -ETIME;
i915_request_put(rq);
return err;
}
static int new_context_sync(struct intel_engine_cs *engine)
{
struct intel_context *ce;
int err;
ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
err = context_sync(ce);
intel_context_put(ce);
return err;
}
static int mixed_contexts_sync(struct intel_engine_cs *engine, u32 *result)
{
int pass;
int err;
for (pass = 0; pass < 2; pass++) {
WRITE_ONCE(*result, 0);
err = context_sync(engine->kernel_context);
if (err || READ_ONCE(*result)) {
if (!err) {
pr_err("pass[%d] wa_bb emitted for the kernel context\n",
pass);
err = -EINVAL;
}
return err;
}
WRITE_ONCE(*result, 0);
err = new_context_sync(engine);
if (READ_ONCE(*result) != STACK_MAGIC) {
if (!err) {
pr_err("pass[%d] wa_bb *NOT* emitted after the kernel context\n",
pass);
err = -EINVAL;
}
return err;
}
WRITE_ONCE(*result, 0);
err = new_context_sync(engine);
if (READ_ONCE(*result) != STACK_MAGIC) {
if (!err) {
pr_err("pass[%d] wa_bb *NOT* emitted for the user context switch\n",
pass);
err = -EINVAL;
}
return err;
}
}
return 0;
}
static int double_context_sync_00(struct intel_engine_cs *engine, u32 *result)
{
struct intel_context *ce;
int err, i;
ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
for (i = 0; i < 2; i++) {
WRITE_ONCE(*result, 0);
err = context_sync(ce);
if (err)
break;
}
intel_context_put(ce);
if (err)
return err;
if (READ_ONCE(*result)) {
pr_err("wa_bb emitted between the same user context\n");
return -EINVAL;
}
return 0;
}
static int kernel_context_sync_00(struct intel_engine_cs *engine, u32 *result)
{
struct intel_context *ce;
int err, i;
ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
for (i = 0; i < 2; i++) {
WRITE_ONCE(*result, 0);
err = context_sync(ce);
if (err)
break;
err = context_sync(engine->kernel_context);
if (err)
break;
}
intel_context_put(ce);
if (err)
return err;
if (READ_ONCE(*result)) {
pr_err("wa_bb emitted between the same user context [with intervening kernel]\n");
return -EINVAL;
}
return 0;
}
static int __live_ctx_switch_wa(struct intel_engine_cs *engine)
{
struct i915_vma *bb;
u32 *result;
int err;
bb = create_wally(engine);
if (IS_ERR(bb))
return PTR_ERR(bb);
result = i915_gem_object_pin_map_unlocked(bb->obj, I915_MAP_WC);
if (IS_ERR(result)) {
intel_context_put(bb->private);
i915_vma_unpin_and_release(&bb, 0);
return PTR_ERR(result);
}
result += 1000;
engine->wa_ctx.vma = bb;
err = mixed_contexts_sync(engine, result);
if (err)
goto out;
err = double_context_sync_00(engine, result);
if (err)
goto out;
err = kernel_context_sync_00(engine, result);
if (err)
goto out;
out:
intel_context_put(engine->wa_ctx.vma->private);
i915_vma_unpin_and_release(&engine->wa_ctx.vma, I915_VMA_RELEASE_MAP);
return err;
}
static int live_ctx_switch_wa(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
/*
* Exercise the inter-context wa batch.
*
* Between each user context we run a wa batch, and since it may
* have implications for user visible state, we have to check that
* we do actually execute it.
*
* The trick we use is to replace the normal wa batch with a custom
* one that writes to a marker within it, and we can then look for
* that marker to confirm if the batch was run when we expect it,
* and equally important it was wasn't run when we don't!
*/
for_each_engine(engine, gt, id) {
struct i915_vma *saved_wa;
int err;
if (!intel_engine_can_store_dword(engine))
continue;
if (IS_GRAPHICS_VER(gt->i915, 4, 5))
continue; /* MI_STORE_DWORD is privileged! */
saved_wa = fetch_and_zero(&engine->wa_ctx.vma);
intel_engine_pm_get(engine);
err = __live_ctx_switch_wa(engine);
intel_engine_pm_put(engine);
if (igt_flush_test(gt->i915))
err = -EIO;
engine->wa_ctx.vma = saved_wa;
if (err)
return err;
}
return 0;
}
int intel_ring_submission_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_ctx_switch_wa),
};
if (to_gt(i915)->submission_method > INTEL_SUBMISSION_RING)
return 0;
return intel_gt_live_subtests(tests, to_gt(i915));
}
| linux-master | drivers/gpu/drm/i915/gt/selftest_ring_submission.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright © 2020 Intel Corporation
*/
static struct intel_ring *mock_ring(unsigned long sz)
{
struct intel_ring *ring;
ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
if (!ring)
return NULL;
kref_init(&ring->ref);
ring->size = sz;
ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(sz);
ring->effective_size = sz;
ring->vaddr = (void *)(ring + 1);
atomic_set(&ring->pin_count, 1);
intel_ring_update_space(ring);
return ring;
}
static void mock_ring_free(struct intel_ring *ring)
{
kfree(ring);
}
static int check_ring_direction(struct intel_ring *ring,
u32 next, u32 prev,
int expected)
{
int result;
result = intel_ring_direction(ring, next, prev);
if (result < 0)
result = -1;
else if (result > 0)
result = 1;
if (result != expected) {
pr_err("intel_ring_direction(%u, %u):%d != %d\n",
next, prev, result, expected);
return -EINVAL;
}
return 0;
}
static int check_ring_step(struct intel_ring *ring, u32 x, u32 step)
{
u32 prev = x, next = intel_ring_wrap(ring, x + step);
int err = 0;
err |= check_ring_direction(ring, next, next, 0);
err |= check_ring_direction(ring, prev, prev, 0);
err |= check_ring_direction(ring, next, prev, 1);
err |= check_ring_direction(ring, prev, next, -1);
return err;
}
static int check_ring_offset(struct intel_ring *ring, u32 x, u32 step)
{
int err = 0;
err |= check_ring_step(ring, x, step);
err |= check_ring_step(ring, intel_ring_wrap(ring, x + 1), step);
err |= check_ring_step(ring, intel_ring_wrap(ring, x - 1), step);
return err;
}
static int igt_ring_direction(void *dummy)
{
struct intel_ring *ring;
unsigned int half = 2048;
int step, err = 0;
ring = mock_ring(2 * half);
if (!ring)
return -ENOMEM;
GEM_BUG_ON(ring->size != 2 * half);
/* Precision of wrap detection is limited to ring->size / 2 */
for (step = 1; step < half; step <<= 1) {
err |= check_ring_offset(ring, 0, step);
err |= check_ring_offset(ring, half, step);
}
err |= check_ring_step(ring, 0, half - 64);
/* And check unwrapped handling for good measure */
err |= check_ring_offset(ring, 0, 2 * half + 64);
err |= check_ring_offset(ring, 3 * half, 1);
mock_ring_free(ring);
return err;
}
int intel_ring_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_ring_direction),
};
return i915_subtests(tests, NULL);
}
| linux-master | drivers/gpu/drm/i915/gt/selftest_ring.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
#include <linux/string_helpers.h>
#include <linux/suspend.h>
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_params.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
#include "intel_gt.h"
#include "intel_gt_clock_utils.h"
#include "intel_gt_pm.h"
#include "intel_gt_print.h"
#include "intel_gt_requests.h"
#include "intel_llc.h"
#include "intel_rc6.h"
#include "intel_rps.h"
#include "intel_wakeref.h"
#include "pxp/intel_pxp_pm.h"
#define I915_GT_SUSPEND_IDLE_TIMEOUT (HZ / 2)
static void user_forcewake(struct intel_gt *gt, bool suspend)
{
int count = atomic_read(>->user_wakeref);
/* Inside suspend/resume so single threaded, no races to worry about. */
if (likely(!count))
return;
intel_gt_pm_get(gt);
if (suspend) {
GEM_BUG_ON(count > atomic_read(>->wakeref.count));
atomic_sub(count, >->wakeref.count);
} else {
atomic_add(count, >->wakeref.count);
}
intel_gt_pm_put(gt);
}
static void runtime_begin(struct intel_gt *gt)
{
local_irq_disable();
write_seqcount_begin(>->stats.lock);
gt->stats.start = ktime_get();
gt->stats.active = true;
write_seqcount_end(>->stats.lock);
local_irq_enable();
}
static void runtime_end(struct intel_gt *gt)
{
local_irq_disable();
write_seqcount_begin(>->stats.lock);
gt->stats.active = false;
gt->stats.total =
ktime_add(gt->stats.total,
ktime_sub(ktime_get(), gt->stats.start));
write_seqcount_end(>->stats.lock);
local_irq_enable();
}
static int __gt_unpark(struct intel_wakeref *wf)
{
struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
struct drm_i915_private *i915 = gt->i915;
GT_TRACE(gt, "\n");
/*
* It seems that the DMC likes to transition between the DC states a lot
* when there are no connected displays (no active power domains) during
* command submission.
*
* This activity has negative impact on the performance of the chip with
* huge latencies observed in the interrupt handler and elsewhere.
*
* Work around it by grabbing a GT IRQ power domain whilst there is any
* GT activity, preventing any DC state transitions.
*/
gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
GEM_BUG_ON(!gt->awake);
intel_rc6_unpark(>->rc6);
intel_rps_unpark(>->rps);
i915_pmu_gt_unparked(gt);
intel_guc_busyness_unpark(gt);
intel_gt_unpark_requests(gt);
runtime_begin(gt);
return 0;
}
static int __gt_park(struct intel_wakeref *wf)
{
struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
intel_wakeref_t wakeref = fetch_and_zero(>->awake);
struct drm_i915_private *i915 = gt->i915;
GT_TRACE(gt, "\n");
runtime_end(gt);
intel_gt_park_requests(gt);
intel_guc_busyness_park(gt);
i915_vma_parked(gt);
i915_pmu_gt_parked(gt);
intel_rps_park(>->rps);
intel_rc6_park(>->rc6);
/* Everything switched off, flush any residual interrupt just in case */
intel_synchronize_irq(i915);
/* Defer dropping the display power well for 100ms, it's slow! */
GEM_BUG_ON(!wakeref);
intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
return 0;
}
static const struct intel_wakeref_ops wf_ops = {
.get = __gt_unpark,
.put = __gt_park,
};
void intel_gt_pm_init_early(struct intel_gt *gt)
{
/*
* We access the runtime_pm structure via gt->i915 here rather than
* gt->uncore as we do elsewhere in the file because gt->uncore is not
* yet initialized for all tiles at this point in the driver startup.
* runtime_pm is per-device rather than per-tile, so this is still the
* correct structure.
*/
intel_wakeref_init(>->wakeref, gt->i915, &wf_ops);
seqcount_mutex_init(>->stats.lock, >->wakeref.mutex);
}
void intel_gt_pm_init(struct intel_gt *gt)
{
/*
* Enabling power-management should be "self-healing". If we cannot
* enable a feature, simply leave it disabled with a notice to the
* user.
*/
intel_rc6_init(>->rc6);
intel_rps_init(>->rps);
}
static bool reset_engines(struct intel_gt *gt)
{
if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
return false;
return __intel_gt_reset(gt, ALL_ENGINES) == 0;
}
static void gt_sanitize(struct intel_gt *gt, bool force)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
GT_TRACE(gt, "force:%s", str_yes_no(force));
/* Use a raw wakeref to avoid calling intel_display_power_get early */
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
intel_gt_check_clock_frequency(gt);
/*
* As we have just resumed the machine and woken the device up from
* deep PCI sleep (presumably D3_cold), assume the HW has been reset
* back to defaults, recovering from whatever wedged state we left it
* in and so worth trying to use the device once more.
*/
if (intel_gt_is_wedged(gt))
intel_gt_unset_wedged(gt);
/* For GuC mode, ensure submission is disabled before stopping ring */
intel_uc_reset_prepare(>->uc);
for_each_engine(engine, gt, id) {
if (engine->reset.prepare)
engine->reset.prepare(engine);
if (engine->sanitize)
engine->sanitize(engine);
}
if (reset_engines(gt) || force) {
for_each_engine(engine, gt, id)
__intel_engine_reset(engine, false);
}
intel_uc_reset(>->uc, false);
for_each_engine(engine, gt, id)
if (engine->reset.finish)
engine->reset.finish(engine);
intel_rps_sanitize(>->rps);
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
}
void intel_gt_pm_fini(struct intel_gt *gt)
{
intel_rc6_fini(>->rc6);
}
int intel_gt_resume(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err;
err = intel_gt_has_unrecoverable_error(gt);
if (err)
return err;
GT_TRACE(gt, "\n");
/*
* After resume, we may need to poke into the pinned kernel
* contexts to paper over any damage caused by the sudden suspend.
* Only the kernel contexts should remain pinned over suspend,
* allowing us to fixup the user contexts on their first pin.
*/
gt_sanitize(gt, true);
intel_gt_pm_get(gt);
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
intel_rc6_sanitize(>->rc6);
if (intel_gt_is_wedged(gt)) {
err = -EIO;
goto out_fw;
}
/* Only when the HW is re-initialised, can we replay the requests */
err = intel_gt_init_hw(gt);
if (err) {
gt_probe_error(gt, "Failed to initialize GPU, declaring it wedged!\n");
goto err_wedged;
}
intel_uc_reset_finish(>->uc);
intel_rps_enable(>->rps);
intel_llc_enable(>->llc);
for_each_engine(engine, gt, id) {
intel_engine_pm_get(engine);
engine->serial++; /* kernel context lost */
err = intel_engine_resume(engine);
intel_engine_pm_put(engine);
if (err) {
gt_err(gt, "Failed to restart %s (%d)\n",
engine->name, err);
goto err_wedged;
}
}
intel_rc6_enable(>->rc6);
intel_uc_resume(>->uc);
user_forcewake(gt, false);
out_fw:
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
intel_gt_pm_put(gt);
return err;
err_wedged:
intel_gt_set_wedged(gt);
goto out_fw;
}
static void wait_for_suspend(struct intel_gt *gt)
{
if (!intel_gt_pm_is_awake(gt))
return;
if (intel_gt_wait_for_idle(gt, I915_GT_SUSPEND_IDLE_TIMEOUT) == -ETIME) {
/*
* Forcibly cancel outstanding work and leave
* the gpu quiet.
*/
intel_gt_set_wedged(gt);
intel_gt_retire_requests(gt);
}
intel_gt_pm_wait_for_idle(gt);
}
void intel_gt_suspend_prepare(struct intel_gt *gt)
{
user_forcewake(gt, true);
wait_for_suspend(gt);
}
static suspend_state_t pm_suspend_target(void)
{
#if IS_ENABLED(CONFIG_SUSPEND) && IS_ENABLED(CONFIG_PM_SLEEP)
return pm_suspend_target_state;
#else
return PM_SUSPEND_TO_IDLE;
#endif
}
void intel_gt_suspend_late(struct intel_gt *gt)
{
intel_wakeref_t wakeref;
/* We expect to be idle already; but also want to be independent */
wait_for_suspend(gt);
if (is_mock_gt(gt))
return;
GEM_BUG_ON(gt->awake);
intel_uc_suspend(>->uc);
/*
* On disabling the device, we want to turn off HW access to memory
* that we no longer own.
*
* However, not all suspend-states disable the device. S0 (s2idle)
* is effectively runtime-suspend, the device is left powered on
* but needs to be put into a low power state. We need to keep
* powermanagement enabled, but we also retain system state and so
* it remains safe to keep on using our allocated memory.
*/
if (pm_suspend_target() == PM_SUSPEND_TO_IDLE)
return;
with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
intel_rps_disable(>->rps);
intel_rc6_disable(>->rc6);
intel_llc_disable(>->llc);
}
gt_sanitize(gt, false);
GT_TRACE(gt, "\n");
}
void intel_gt_runtime_suspend(struct intel_gt *gt)
{
intel_uc_runtime_suspend(>->uc);
GT_TRACE(gt, "\n");
}
int intel_gt_runtime_resume(struct intel_gt *gt)
{
int ret;
GT_TRACE(gt, "\n");
intel_gt_init_swizzling(gt);
intel_ggtt_restore_fences(gt->ggtt);
ret = intel_uc_runtime_resume(>->uc);
if (ret)
return ret;
return 0;
}
static ktime_t __intel_gt_get_awake_time(const struct intel_gt *gt)
{
ktime_t total = gt->stats.total;
if (gt->stats.active)
total = ktime_add(total,
ktime_sub(ktime_get(), gt->stats.start));
return total;
}
ktime_t intel_gt_get_awake_time(const struct intel_gt *gt)
{
unsigned int seq;
ktime_t total;
do {
seq = read_seqcount_begin(>->stats.lock);
total = __intel_gt_get_awake_time(gt);
} while (read_seqcount_retry(>->stats.lock, seq));
return total;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_gt_pm.c"
#endif
| linux-master | drivers/gpu/drm/i915/gt/intel_gt_pm.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
#include "i915_drv.h"
#include "intel_context.h"
#include "intel_gpu_commands.h"
#include "intel_gt.h"
#include "intel_gtt.h"
#include "intel_migrate.h"
#include "intel_ring.h"
#include "gem/i915_gem_lmem.h"
struct insert_pte_data {
u64 offset;
};
#define CHUNK_SZ SZ_8M /* ~1ms at 8GiB/s preemption delay */
#define GET_CCS_BYTES(i915, size) (HAS_FLAT_CCS(i915) ? \
DIV_ROUND_UP(size, NUM_BYTES_PER_CCS_BYTE) : 0)
static bool engine_supports_migration(struct intel_engine_cs *engine)
{
if (!engine)
return false;
/*
* We need the ability to prevent aribtration (MI_ARB_ON_OFF),
* the ability to write PTE using inline data (MI_STORE_DATA)
* and of course the ability to do the block transfer (blits).
*/
GEM_BUG_ON(engine->class != COPY_ENGINE_CLASS);
return true;
}
static void xehpsdv_toggle_pdes(struct i915_address_space *vm,
struct i915_page_table *pt,
void *data)
{
struct insert_pte_data *d = data;
/*
* Insert a dummy PTE into every PT that will map to LMEM to ensure
* we have a correctly setup PDE structure for later use.
*/
vm->insert_page(vm, 0, d->offset,
i915_gem_get_pat_index(vm->i915, I915_CACHE_NONE),
PTE_LM);
GEM_BUG_ON(!pt->is_compact);
d->offset += SZ_2M;
}
static void xehpsdv_insert_pte(struct i915_address_space *vm,
struct i915_page_table *pt,
void *data)
{
struct insert_pte_data *d = data;
/*
* We are playing tricks here, since the actual pt, from the hw
* pov, is only 256bytes with 32 entries, or 4096bytes with 512
* entries, but we are still guaranteed that the physical
* alignment is 64K underneath for the pt, and we are careful
* not to access the space in the void.
*/
vm->insert_page(vm, px_dma(pt), d->offset,
i915_gem_get_pat_index(vm->i915, I915_CACHE_NONE),
PTE_LM);
d->offset += SZ_64K;
}
static void insert_pte(struct i915_address_space *vm,
struct i915_page_table *pt,
void *data)
{
struct insert_pte_data *d = data;
vm->insert_page(vm, px_dma(pt), d->offset,
i915_gem_get_pat_index(vm->i915, I915_CACHE_NONE),
i915_gem_object_is_lmem(pt->base) ? PTE_LM : 0);
d->offset += PAGE_SIZE;
}
static struct i915_address_space *migrate_vm(struct intel_gt *gt)
{
struct i915_vm_pt_stash stash = {};
struct i915_ppgtt *vm;
int err;
int i;
/*
* We construct a very special VM for use by all migration contexts,
* it is kept pinned so that it can be used at any time. As we need
* to pre-allocate the page directories for the migration VM, this
* limits us to only using a small number of prepared vma.
*
* To be able to pipeline and reschedule migration operations while
* avoiding unnecessary contention on the vm itself, the PTE updates
* are inline with the blits. All the blits use the same fixed
* addresses, with the backing store redirection being updated on the
* fly. Only 2 implicit vma are used for all migration operations.
*
* We lay the ppGTT out as:
*
* [0, CHUNK_SZ) -> first object
* [CHUNK_SZ, 2 * CHUNK_SZ) -> second object
* [2 * CHUNK_SZ, 2 * CHUNK_SZ + 2 * CHUNK_SZ >> 9] -> PTE
*
* By exposing the dma addresses of the page directories themselves
* within the ppGTT, we are then able to rewrite the PTE prior to use.
* But the PTE update and subsequent migration operation must be atomic,
* i.e. within the same non-preemptible window so that we do not switch
* to another migration context that overwrites the PTE.
*
* This changes quite a bit on platforms with HAS_64K_PAGES support,
* where we instead have three windows, each CHUNK_SIZE in size. The
* first is reserved for mapping system-memory, and that just uses the
* 512 entry layout using 4K GTT pages. The other two windows just map
* lmem pages and must use the new compact 32 entry layout using 64K GTT
* pages, which ensures we can address any lmem object that the user
* throws at us. We then also use the xehpsdv_toggle_pdes as a way of
* just toggling the PDE bit(GEN12_PDE_64K) for us, to enable the
* compact layout for each of these page-tables, that fall within the
* [CHUNK_SIZE, 3 * CHUNK_SIZE) range.
*
* We lay the ppGTT out as:
*
* [0, CHUNK_SZ) -> first window/object, maps smem
* [CHUNK_SZ, 2 * CHUNK_SZ) -> second window/object, maps lmem src
* [2 * CHUNK_SZ, 3 * CHUNK_SZ) -> third window/object, maps lmem dst
*
* For the PTE window it's also quite different, since each PTE must
* point to some 64K page, one for each PT(since it's in lmem), and yet
* each is only <= 4096bytes, but since the unused space within that PTE
* range is never touched, this should be fine.
*
* So basically each PT now needs 64K of virtual memory, instead of 4K,
* which looks like:
*
* [3 * CHUNK_SZ, 3 * CHUNK_SZ + ((3 * CHUNK_SZ / SZ_2M) * SZ_64K)] -> PTE
*/
vm = i915_ppgtt_create(gt, I915_BO_ALLOC_PM_EARLY);
if (IS_ERR(vm))
return ERR_CAST(vm);
if (!vm->vm.allocate_va_range || !vm->vm.foreach) {
err = -ENODEV;
goto err_vm;
}
if (HAS_64K_PAGES(gt->i915))
stash.pt_sz = I915_GTT_PAGE_SIZE_64K;
/*
* Each engine instance is assigned its own chunk in the VM, so
* that we can run multiple instances concurrently
*/
for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) {
struct intel_engine_cs *engine;
u64 base = (u64)i << 32;
struct insert_pte_data d = {};
struct i915_gem_ww_ctx ww;
u64 sz;
engine = gt->engine_class[COPY_ENGINE_CLASS][i];
if (!engine_supports_migration(engine))
continue;
/*
* We copy in 8MiB chunks. Each PDE covers 2MiB, so we need
* 4x2 page directories for source/destination.
*/
if (HAS_64K_PAGES(gt->i915))
sz = 3 * CHUNK_SZ;
else
sz = 2 * CHUNK_SZ;
d.offset = base + sz;
/*
* We need another page directory setup so that we can write
* the 8x512 PTE in each chunk.
*/
if (HAS_64K_PAGES(gt->i915))
sz += (sz / SZ_2M) * SZ_64K;
else
sz += (sz >> 12) * sizeof(u64);
err = i915_vm_alloc_pt_stash(&vm->vm, &stash, sz);
if (err)
goto err_vm;
for_i915_gem_ww(&ww, err, true) {
err = i915_vm_lock_objects(&vm->vm, &ww);
if (err)
continue;
err = i915_vm_map_pt_stash(&vm->vm, &stash);
if (err)
continue;
vm->vm.allocate_va_range(&vm->vm, &stash, base, sz);
}
i915_vm_free_pt_stash(&vm->vm, &stash);
if (err)
goto err_vm;
/* Now allow the GPU to rewrite the PTE via its own ppGTT */
if (HAS_64K_PAGES(gt->i915)) {
vm->vm.foreach(&vm->vm, base, d.offset - base,
xehpsdv_insert_pte, &d);
d.offset = base + CHUNK_SZ;
vm->vm.foreach(&vm->vm,
d.offset,
2 * CHUNK_SZ,
xehpsdv_toggle_pdes, &d);
} else {
vm->vm.foreach(&vm->vm, base, d.offset - base,
insert_pte, &d);
}
}
return &vm->vm;
err_vm:
i915_vm_put(&vm->vm);
return ERR_PTR(err);
}
static struct intel_engine_cs *first_copy_engine(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
int i;
for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) {
engine = gt->engine_class[COPY_ENGINE_CLASS][i];
if (engine_supports_migration(engine))
return engine;
}
return NULL;
}
static struct intel_context *pinned_context(struct intel_gt *gt)
{
static struct lock_class_key key;
struct intel_engine_cs *engine;
struct i915_address_space *vm;
struct intel_context *ce;
engine = first_copy_engine(gt);
if (!engine)
return ERR_PTR(-ENODEV);
vm = migrate_vm(gt);
if (IS_ERR(vm))
return ERR_CAST(vm);
ce = intel_engine_create_pinned_context(engine, vm, SZ_512K,
I915_GEM_HWS_MIGRATE,
&key, "migrate");
i915_vm_put(vm);
return ce;
}
int intel_migrate_init(struct intel_migrate *m, struct intel_gt *gt)
{
struct intel_context *ce;
memset(m, 0, sizeof(*m));
ce = pinned_context(gt);
if (IS_ERR(ce))
return PTR_ERR(ce);
m->context = ce;
return 0;
}
static int random_index(unsigned int max)
{
return upper_32_bits(mul_u32_u32(get_random_u32(), max));
}
static struct intel_context *__migrate_engines(struct intel_gt *gt)
{
struct intel_engine_cs *engines[MAX_ENGINE_INSTANCE];
struct intel_engine_cs *engine;
unsigned int count, i;
count = 0;
for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) {
engine = gt->engine_class[COPY_ENGINE_CLASS][i];
if (engine_supports_migration(engine))
engines[count++] = engine;
}
return intel_context_create(engines[random_index(count)]);
}
struct intel_context *intel_migrate_create_context(struct intel_migrate *m)
{
struct intel_context *ce;
/*
* We randomly distribute contexts across the engines upon constrction,
* as they all share the same pinned vm, and so in order to allow
* multiple blits to run in parallel, we must construct each blit
* to use a different range of the vm for its GTT. This has to be
* known at construction, so we can not use the late greedy load
* balancing of the virtual-engine.
*/
ce = __migrate_engines(m->context->engine->gt);
if (IS_ERR(ce))
return ce;
ce->ring = NULL;
ce->ring_size = SZ_256K;
i915_vm_put(ce->vm);
ce->vm = i915_vm_get(m->context->vm);
return ce;
}
static inline struct sgt_dma sg_sgt(struct scatterlist *sg)
{
dma_addr_t addr = sg_dma_address(sg);
return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) };
}
static int emit_no_arbitration(struct i915_request *rq)
{
u32 *cs;
cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
/* Explicitly disable preemption for this request. */
*cs++ = MI_ARB_ON_OFF;
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
return 0;
}
static int max_pte_pkt_size(struct i915_request *rq, int pkt)
{
struct intel_ring *ring = rq->ring;
pkt = min_t(int, pkt, (ring->space - rq->reserved_space) / sizeof(u32) + 5);
pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5);
return pkt;
}
#define I915_EMIT_PTE_NUM_DWORDS 6
static int emit_pte(struct i915_request *rq,
struct sgt_dma *it,
unsigned int pat_index,
bool is_lmem,
u64 offset,
int length)
{
bool has_64K_pages = HAS_64K_PAGES(rq->i915);
const u64 encode = rq->context->vm->pte_encode(0, pat_index,
is_lmem ? PTE_LM : 0);
struct intel_ring *ring = rq->ring;
int pkt, dword_length;
u32 total = 0;
u32 page_size;
u32 *hdr, *cs;
GEM_BUG_ON(GRAPHICS_VER(rq->i915) < 8);
page_size = I915_GTT_PAGE_SIZE;
dword_length = 0x400;
/* Compute the page directory offset for the target address range */
if (has_64K_pages) {
GEM_BUG_ON(!IS_ALIGNED(offset, SZ_2M));
offset /= SZ_2M;
offset *= SZ_64K;
offset += 3 * CHUNK_SZ;
if (is_lmem) {
page_size = I915_GTT_PAGE_SIZE_64K;
dword_length = 0x40;
}
} else {
offset >>= 12;
offset *= sizeof(u64);
offset += 2 * CHUNK_SZ;
}
offset += (u64)rq->engine->instance << 32;
cs = intel_ring_begin(rq, I915_EMIT_PTE_NUM_DWORDS);
if (IS_ERR(cs))
return PTR_ERR(cs);
/* Pack as many PTE updates as possible into a single MI command */
pkt = max_pte_pkt_size(rq, dword_length);
hdr = cs;
*cs++ = MI_STORE_DATA_IMM | REG_BIT(21); /* as qword elements */
*cs++ = lower_32_bits(offset);
*cs++ = upper_32_bits(offset);
do {
if (cs - hdr >= pkt) {
int dword_rem;
*hdr += cs - hdr - 2;
*cs++ = MI_NOOP;
ring->emit = (void *)cs - ring->vaddr;
intel_ring_advance(rq, cs);
intel_ring_update_space(ring);
cs = intel_ring_begin(rq, I915_EMIT_PTE_NUM_DWORDS);
if (IS_ERR(cs))
return PTR_ERR(cs);
dword_rem = dword_length;
if (has_64K_pages) {
if (IS_ALIGNED(total, SZ_2M)) {
offset = round_up(offset, SZ_64K);
} else {
dword_rem = SZ_2M - (total & (SZ_2M - 1));
dword_rem /= page_size;
dword_rem *= 2;
}
}
pkt = max_pte_pkt_size(rq, dword_rem);
hdr = cs;
*cs++ = MI_STORE_DATA_IMM | REG_BIT(21);
*cs++ = lower_32_bits(offset);
*cs++ = upper_32_bits(offset);
}
GEM_BUG_ON(!IS_ALIGNED(it->dma, page_size));
*cs++ = lower_32_bits(encode | it->dma);
*cs++ = upper_32_bits(encode | it->dma);
offset += 8;
total += page_size;
it->dma += page_size;
if (it->dma >= it->max) {
it->sg = __sg_next(it->sg);
if (!it->sg || sg_dma_len(it->sg) == 0)
break;
it->dma = sg_dma_address(it->sg);
it->max = it->dma + sg_dma_len(it->sg);
}
} while (total < length);
*hdr += cs - hdr - 2;
*cs++ = MI_NOOP;
ring->emit = (void *)cs - ring->vaddr;
intel_ring_advance(rq, cs);
intel_ring_update_space(ring);
return total;
}
static bool wa_1209644611_applies(int ver, u32 size)
{
u32 height = size >> PAGE_SHIFT;
if (ver != 11)
return false;
return height % 4 == 3 && height <= 8;
}
/**
* DOC: Flat-CCS - Memory compression for Local memory
*
* On Xe-HP and later devices, we use dedicated compression control state (CCS)
* stored in local memory for each surface, to support the 3D and media
* compression formats.
*
* The memory required for the CCS of the entire local memory is 1/256 of the
* local memory size. So before the kernel boot, the required memory is reserved
* for the CCS data and a secure register will be programmed with the CCS base
* address.
*
* Flat CCS data needs to be cleared when a lmem object is allocated.
* And CCS data can be copied in and out of CCS region through
* XY_CTRL_SURF_COPY_BLT. CPU can't access the CCS data directly.
*
* I915 supports Flat-CCS on lmem only objects. When an objects has smem in
* its preference list, on memory pressure, i915 needs to migrate the lmem
* content into smem. If the lmem object is Flat-CCS compressed by userspace,
* then i915 needs to decompress it. But I915 lack the required information
* for such decompression. Hence I915 supports Flat-CCS only on lmem only objects.
*
* When we exhaust the lmem, Flat-CCS capable objects' lmem backing memory can
* be temporarily evicted to smem, along with the auxiliary CCS state, where
* it can be potentially swapped-out at a later point, if required.
* If userspace later touches the evicted pages, then we always move
* the backing memory back to lmem, which includes restoring the saved CCS state,
* and potentially performing any required swap-in.
*
* For the migration of the lmem objects with smem in placement list, such as
* {lmem, smem}, objects are treated as non Flat-CCS capable objects.
*/
static inline u32 *i915_flush_dw(u32 *cmd, u32 flags)
{
*cmd++ = MI_FLUSH_DW | flags;
*cmd++ = 0;
*cmd++ = 0;
return cmd;
}
static int emit_copy_ccs(struct i915_request *rq,
u32 dst_offset, u8 dst_access,
u32 src_offset, u8 src_access, int size)
{
struct drm_i915_private *i915 = rq->i915;
int mocs = rq->engine->gt->mocs.uc_index << 1;
u32 num_ccs_blks;
u32 *cs;
cs = intel_ring_begin(rq, 12);
if (IS_ERR(cs))
return PTR_ERR(cs);
num_ccs_blks = DIV_ROUND_UP(GET_CCS_BYTES(i915, size),
NUM_CCS_BYTES_PER_BLOCK);
GEM_BUG_ON(num_ccs_blks > NUM_CCS_BLKS_PER_XFER);
cs = i915_flush_dw(cs, MI_FLUSH_DW_LLC | MI_FLUSH_DW_CCS);
/*
* The XY_CTRL_SURF_COPY_BLT instruction is used to copy the CCS
* data in and out of the CCS region.
*
* We can copy at most 1024 blocks of 256 bytes using one
* XY_CTRL_SURF_COPY_BLT instruction.
*
* In case we need to copy more than 1024 blocks, we need to add
* another instruction to the same batch buffer.
*
* 1024 blocks of 256 bytes of CCS represent a total 256KB of CCS.
*
* 256 KB of CCS represents 256 * 256 KB = 64 MB of LMEM.
*/
*cs++ = XY_CTRL_SURF_COPY_BLT |
src_access << SRC_ACCESS_TYPE_SHIFT |
dst_access << DST_ACCESS_TYPE_SHIFT |
((num_ccs_blks - 1) & CCS_SIZE_MASK) << CCS_SIZE_SHIFT;
*cs++ = src_offset;
*cs++ = rq->engine->instance |
FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, mocs);
*cs++ = dst_offset;
*cs++ = rq->engine->instance |
FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, mocs);
cs = i915_flush_dw(cs, MI_FLUSH_DW_LLC | MI_FLUSH_DW_CCS);
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
return 0;
}
static int emit_copy(struct i915_request *rq,
u32 dst_offset, u32 src_offset, int size)
{
const int ver = GRAPHICS_VER(rq->i915);
u32 instance = rq->engine->instance;
u32 *cs;
cs = intel_ring_begin(rq, ver >= 8 ? 10 : 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
if (ver >= 9 && !wa_1209644611_applies(ver, size)) {
*cs++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2);
*cs++ = BLT_DEPTH_32 | PAGE_SIZE;
*cs++ = 0;
*cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
*cs++ = dst_offset;
*cs++ = instance;
*cs++ = 0;
*cs++ = PAGE_SIZE;
*cs++ = src_offset;
*cs++ = instance;
} else if (ver >= 8) {
*cs++ = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (10 - 2);
*cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
*cs++ = 0;
*cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
*cs++ = dst_offset;
*cs++ = instance;
*cs++ = 0;
*cs++ = PAGE_SIZE;
*cs++ = src_offset;
*cs++ = instance;
} else {
GEM_BUG_ON(instance);
*cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
*cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
*cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE;
*cs++ = dst_offset;
*cs++ = PAGE_SIZE;
*cs++ = src_offset;
}
intel_ring_advance(rq, cs);
return 0;
}
static u64 scatter_list_length(struct scatterlist *sg)
{
u64 len = 0;
while (sg && sg_dma_len(sg)) {
len += sg_dma_len(sg);
sg = sg_next(sg);
}
return len;
}
static int
calculate_chunk_sz(struct drm_i915_private *i915, bool src_is_lmem,
u64 bytes_to_cpy, u64 ccs_bytes_to_cpy)
{
if (ccs_bytes_to_cpy && !src_is_lmem)
/*
* When CHUNK_SZ is passed all the pages upto CHUNK_SZ
* will be taken for the blt. in Flat-ccs supported
* platform Smem obj will have more pages than required
* for main meory hence limit it to the required size
* for main memory
*/
return min_t(u64, bytes_to_cpy, CHUNK_SZ);
else
return CHUNK_SZ;
}
static void get_ccs_sg_sgt(struct sgt_dma *it, u64 bytes_to_cpy)
{
u64 len;
do {
GEM_BUG_ON(!it->sg || !sg_dma_len(it->sg));
len = it->max - it->dma;
if (len > bytes_to_cpy) {
it->dma += bytes_to_cpy;
break;
}
bytes_to_cpy -= len;
it->sg = __sg_next(it->sg);
it->dma = sg_dma_address(it->sg);
it->max = it->dma + sg_dma_len(it->sg);
} while (bytes_to_cpy);
}
int
intel_context_migrate_copy(struct intel_context *ce,
const struct i915_deps *deps,
struct scatterlist *src,
unsigned int src_pat_index,
bool src_is_lmem,
struct scatterlist *dst,
unsigned int dst_pat_index,
bool dst_is_lmem,
struct i915_request **out)
{
struct sgt_dma it_src = sg_sgt(src), it_dst = sg_sgt(dst), it_ccs;
struct drm_i915_private *i915 = ce->engine->i915;
u64 ccs_bytes_to_cpy = 0, bytes_to_cpy;
unsigned int ccs_pat_index;
u32 src_offset, dst_offset;
u8 src_access, dst_access;
struct i915_request *rq;
u64 src_sz, dst_sz;
bool ccs_is_src, overwrite_ccs;
int err;
GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
GEM_BUG_ON(IS_DGFX(ce->engine->i915) && (!src_is_lmem && !dst_is_lmem));
*out = NULL;
GEM_BUG_ON(ce->ring->size < SZ_64K);
src_sz = scatter_list_length(src);
bytes_to_cpy = src_sz;
if (HAS_FLAT_CCS(i915) && src_is_lmem ^ dst_is_lmem) {
src_access = !src_is_lmem && dst_is_lmem;
dst_access = !src_access;
dst_sz = scatter_list_length(dst);
if (src_is_lmem) {
it_ccs = it_dst;
ccs_pat_index = dst_pat_index;
ccs_is_src = false;
} else if (dst_is_lmem) {
bytes_to_cpy = dst_sz;
it_ccs = it_src;
ccs_pat_index = src_pat_index;
ccs_is_src = true;
}
/*
* When there is a eviction of ccs needed smem will have the
* extra pages for the ccs data
*
* TO-DO: Want to move the size mismatch check to a WARN_ON,
* but still we have some requests of smem->lmem with same size.
* Need to fix it.
*/
ccs_bytes_to_cpy = src_sz != dst_sz ? GET_CCS_BYTES(i915, bytes_to_cpy) : 0;
if (ccs_bytes_to_cpy)
get_ccs_sg_sgt(&it_ccs, bytes_to_cpy);
}
overwrite_ccs = HAS_FLAT_CCS(i915) && !ccs_bytes_to_cpy && dst_is_lmem;
src_offset = 0;
dst_offset = CHUNK_SZ;
if (HAS_64K_PAGES(ce->engine->i915)) {
src_offset = 0;
dst_offset = 0;
if (src_is_lmem)
src_offset = CHUNK_SZ;
if (dst_is_lmem)
dst_offset = 2 * CHUNK_SZ;
}
do {
int len;
rq = i915_request_create(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_ce;
}
if (deps) {
err = i915_request_await_deps(rq, deps);
if (err)
goto out_rq;
if (rq->engine->emit_init_breadcrumb) {
err = rq->engine->emit_init_breadcrumb(rq);
if (err)
goto out_rq;
}
deps = NULL;
}
/* The PTE updates + copy must not be interrupted. */
err = emit_no_arbitration(rq);
if (err)
goto out_rq;
src_sz = calculate_chunk_sz(i915, src_is_lmem,
bytes_to_cpy, ccs_bytes_to_cpy);
len = emit_pte(rq, &it_src, src_pat_index, src_is_lmem,
src_offset, src_sz);
if (!len) {
err = -EINVAL;
goto out_rq;
}
if (len < 0) {
err = len;
goto out_rq;
}
err = emit_pte(rq, &it_dst, dst_pat_index, dst_is_lmem,
dst_offset, len);
if (err < 0)
goto out_rq;
if (err < len) {
err = -EINVAL;
goto out_rq;
}
err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
if (err)
goto out_rq;
err = emit_copy(rq, dst_offset, src_offset, len);
if (err)
goto out_rq;
bytes_to_cpy -= len;
if (ccs_bytes_to_cpy) {
int ccs_sz;
err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
if (err)
goto out_rq;
ccs_sz = GET_CCS_BYTES(i915, len);
err = emit_pte(rq, &it_ccs, ccs_pat_index, false,
ccs_is_src ? src_offset : dst_offset,
ccs_sz);
if (err < 0)
goto out_rq;
if (err < ccs_sz) {
err = -EINVAL;
goto out_rq;
}
err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
if (err)
goto out_rq;
err = emit_copy_ccs(rq, dst_offset, dst_access,
src_offset, src_access, len);
if (err)
goto out_rq;
err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
if (err)
goto out_rq;
ccs_bytes_to_cpy -= ccs_sz;
} else if (overwrite_ccs) {
err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
if (err)
goto out_rq;
if (src_is_lmem) {
/*
* If the src is already in lmem, then we must
* be doing an lmem -> lmem transfer, and so
* should be safe to directly copy the CCS
* state. In this case we have either
* initialised the CCS aux state when first
* clearing the pages (since it is already
* allocated in lmem), or the user has
* potentially populated it, in which case we
* need to copy the CCS state as-is.
*/
err = emit_copy_ccs(rq,
dst_offset, INDIRECT_ACCESS,
src_offset, INDIRECT_ACCESS,
len);
} else {
/*
* While we can't always restore/manage the CCS
* state, we still need to ensure we don't leak
* the CCS state from the previous user, so make
* sure we overwrite it with something.
*/
err = emit_copy_ccs(rq,
dst_offset, INDIRECT_ACCESS,
dst_offset, DIRECT_ACCESS,
len);
}
if (err)
goto out_rq;
err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
if (err)
goto out_rq;
}
/* Arbitration is re-enabled between requests. */
out_rq:
if (*out)
i915_request_put(*out);
*out = i915_request_get(rq);
i915_request_add(rq);
if (err)
break;
if (!bytes_to_cpy && !ccs_bytes_to_cpy) {
if (src_is_lmem)
WARN_ON(it_src.sg && sg_dma_len(it_src.sg));
else
WARN_ON(it_dst.sg && sg_dma_len(it_dst.sg));
break;
}
if (WARN_ON(!it_src.sg || !sg_dma_len(it_src.sg) ||
!it_dst.sg || !sg_dma_len(it_dst.sg) ||
(ccs_bytes_to_cpy && (!it_ccs.sg ||
!sg_dma_len(it_ccs.sg))))) {
err = -EINVAL;
break;
}
cond_resched();
} while (1);
out_ce:
return err;
}
static int emit_clear(struct i915_request *rq, u32 offset, int size,
u32 value, bool is_lmem)
{
struct drm_i915_private *i915 = rq->i915;
int mocs = rq->engine->gt->mocs.uc_index << 1;
const int ver = GRAPHICS_VER(i915);
int ring_sz;
u32 *cs;
GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
ring_sz = XY_FAST_COLOR_BLT_DW;
else if (ver >= 8)
ring_sz = 8;
else
ring_sz = 6;
cs = intel_ring_begin(rq, ring_sz);
if (IS_ERR(cs))
return PTR_ERR(cs);
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
*cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 |
(XY_FAST_COLOR_BLT_DW - 2);
*cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, mocs) |
(PAGE_SIZE - 1);
*cs++ = 0;
*cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
*cs++ = offset;
*cs++ = rq->engine->instance;
*cs++ = !is_lmem << XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT;
/* BG7 */
*cs++ = value;
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
/* BG11 */
*cs++ = 0;
*cs++ = 0;
/* BG13 */
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
} else if (ver >= 8) {
*cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2);
*cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
*cs++ = 0;
*cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
*cs++ = offset;
*cs++ = rq->engine->instance;
*cs++ = value;
*cs++ = MI_NOOP;
} else {
*cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
*cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
*cs++ = 0;
*cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
*cs++ = offset;
*cs++ = value;
}
intel_ring_advance(rq, cs);
return 0;
}
int
intel_context_migrate_clear(struct intel_context *ce,
const struct i915_deps *deps,
struct scatterlist *sg,
unsigned int pat_index,
bool is_lmem,
u32 value,
struct i915_request **out)
{
struct drm_i915_private *i915 = ce->engine->i915;
struct sgt_dma it = sg_sgt(sg);
struct i915_request *rq;
u32 offset;
int err;
GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
*out = NULL;
GEM_BUG_ON(ce->ring->size < SZ_64K);
offset = 0;
if (HAS_64K_PAGES(i915) && is_lmem)
offset = CHUNK_SZ;
do {
int len;
rq = i915_request_create(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_ce;
}
if (deps) {
err = i915_request_await_deps(rq, deps);
if (err)
goto out_rq;
if (rq->engine->emit_init_breadcrumb) {
err = rq->engine->emit_init_breadcrumb(rq);
if (err)
goto out_rq;
}
deps = NULL;
}
/* The PTE updates + clear must not be interrupted. */
err = emit_no_arbitration(rq);
if (err)
goto out_rq;
len = emit_pte(rq, &it, pat_index, is_lmem, offset, CHUNK_SZ);
if (len <= 0) {
err = len;
goto out_rq;
}
err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
if (err)
goto out_rq;
err = emit_clear(rq, offset, len, value, is_lmem);
if (err)
goto out_rq;
if (HAS_FLAT_CCS(i915) && is_lmem && !value) {
/*
* copy the content of memory into corresponding
* ccs surface
*/
err = emit_copy_ccs(rq, offset, INDIRECT_ACCESS, offset,
DIRECT_ACCESS, len);
if (err)
goto out_rq;
}
err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
/* Arbitration is re-enabled between requests. */
out_rq:
if (*out)
i915_request_put(*out);
*out = i915_request_get(rq);
i915_request_add(rq);
if (err || !it.sg || !sg_dma_len(it.sg))
break;
cond_resched();
} while (1);
out_ce:
return err;
}
int intel_migrate_copy(struct intel_migrate *m,
struct i915_gem_ww_ctx *ww,
const struct i915_deps *deps,
struct scatterlist *src,
unsigned int src_pat_index,
bool src_is_lmem,
struct scatterlist *dst,
unsigned int dst_pat_index,
bool dst_is_lmem,
struct i915_request **out)
{
struct intel_context *ce;
int err;
*out = NULL;
if (!m->context)
return -ENODEV;
ce = intel_migrate_create_context(m);
if (IS_ERR(ce))
ce = intel_context_get(m->context);
GEM_BUG_ON(IS_ERR(ce));
err = intel_context_pin_ww(ce, ww);
if (err)
goto out;
err = intel_context_migrate_copy(ce, deps,
src, src_pat_index, src_is_lmem,
dst, dst_pat_index, dst_is_lmem,
out);
intel_context_unpin(ce);
out:
intel_context_put(ce);
return err;
}
int
intel_migrate_clear(struct intel_migrate *m,
struct i915_gem_ww_ctx *ww,
const struct i915_deps *deps,
struct scatterlist *sg,
unsigned int pat_index,
bool is_lmem,
u32 value,
struct i915_request **out)
{
struct intel_context *ce;
int err;
*out = NULL;
if (!m->context)
return -ENODEV;
ce = intel_migrate_create_context(m);
if (IS_ERR(ce))
ce = intel_context_get(m->context);
GEM_BUG_ON(IS_ERR(ce));
err = intel_context_pin_ww(ce, ww);
if (err)
goto out;
err = intel_context_migrate_clear(ce, deps, sg, pat_index,
is_lmem, value, out);
intel_context_unpin(ce);
out:
intel_context_put(ce);
return err;
}
void intel_migrate_fini(struct intel_migrate *m)
{
struct intel_context *ce;
ce = fetch_and_zero(&m->context);
if (!ce)
return;
intel_engine_destroy_pinned_context(ce);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_migrate.c"
#endif
| linux-master | drivers/gpu/drm/i915/gt/intel_migrate.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
#include <drm/drm_managed.h>
#include <drm/intel-gtt.h>
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"
#include "i915_perf_oa_regs.h"
#include "i915_reg.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
#include "intel_engine_regs.h"
#include "intel_ggtt_gmch.h"
#include "intel_gt.h"
#include "intel_gt_buffer_pool.h"
#include "intel_gt_clock_utils.h"
#include "intel_gt_debugfs.h"
#include "intel_gt_mcr.h"
#include "intel_gt_pm.h"
#include "intel_gt_print.h"
#include "intel_gt_regs.h"
#include "intel_gt_requests.h"
#include "intel_migrate.h"
#include "intel_mocs.h"
#include "intel_pci_config.h"
#include "intel_rc6.h"
#include "intel_renderstate.h"
#include "intel_rps.h"
#include "intel_sa_media.h"
#include "intel_gt_sysfs.h"
#include "intel_tlb.h"
#include "intel_uncore.h"
#include "shmem_utils.h"
void intel_gt_common_init_early(struct intel_gt *gt)
{
spin_lock_init(gt->irq_lock);
INIT_LIST_HEAD(>->closed_vma);
spin_lock_init(>->closed_lock);
init_llist_head(>->watchdog.list);
INIT_WORK(>->watchdog.work, intel_gt_watchdog_work);
intel_gt_init_buffer_pool(gt);
intel_gt_init_reset(gt);
intel_gt_init_requests(gt);
intel_gt_init_timelines(gt);
intel_gt_init_tlb(gt);
intel_gt_pm_init_early(gt);
intel_wopcm_init_early(>->wopcm);
intel_uc_init_early(>->uc);
intel_rps_init_early(>->rps);
}
/* Preliminary initialization of Tile 0 */
int intel_root_gt_init_early(struct drm_i915_private *i915)
{
struct intel_gt *gt = to_gt(i915);
gt->i915 = i915;
gt->uncore = &i915->uncore;
gt->irq_lock = drmm_kzalloc(&i915->drm, sizeof(*gt->irq_lock), GFP_KERNEL);
if (!gt->irq_lock)
return -ENOMEM;
intel_gt_common_init_early(gt);
return 0;
}
static int intel_gt_probe_lmem(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
unsigned int instance = gt->info.id;
int id = INTEL_REGION_LMEM_0 + instance;
struct intel_memory_region *mem;
int err;
mem = intel_gt_setup_lmem(gt);
if (IS_ERR(mem)) {
err = PTR_ERR(mem);
if (err == -ENODEV)
return 0;
gt_err(gt, "Failed to setup region(%d) type=%d\n",
err, INTEL_MEMORY_LOCAL);
return err;
}
mem->id = id;
mem->instance = instance;
intel_memory_region_set_name(mem, "local%u", mem->instance);
GEM_BUG_ON(!HAS_REGION(i915, id));
GEM_BUG_ON(i915->mm.regions[id]);
i915->mm.regions[id] = mem;
return 0;
}
int intel_gt_assign_ggtt(struct intel_gt *gt)
{
/* Media GT shares primary GT's GGTT */
if (gt->type == GT_MEDIA) {
gt->ggtt = to_gt(gt->i915)->ggtt;
} else {
gt->ggtt = i915_ggtt_create(gt->i915);
if (IS_ERR(gt->ggtt))
return PTR_ERR(gt->ggtt);
}
list_add_tail(>->ggtt_link, >->ggtt->gt_list);
return 0;
}
int intel_gt_init_mmio(struct intel_gt *gt)
{
intel_gt_init_clock_frequency(gt);
intel_uc_init_mmio(>->uc);
intel_sseu_info_init(gt);
intel_gt_mcr_init(gt);
return intel_engines_init_mmio(gt);
}
static void init_unused_ring(struct intel_gt *gt, u32 base)
{
struct intel_uncore *uncore = gt->uncore;
intel_uncore_write(uncore, RING_CTL(base), 0);
intel_uncore_write(uncore, RING_HEAD(base), 0);
intel_uncore_write(uncore, RING_TAIL(base), 0);
intel_uncore_write(uncore, RING_START(base), 0);
}
static void init_unused_rings(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
if (IS_I830(i915)) {
init_unused_ring(gt, PRB1_BASE);
init_unused_ring(gt, SRB0_BASE);
init_unused_ring(gt, SRB1_BASE);
init_unused_ring(gt, SRB2_BASE);
init_unused_ring(gt, SRB3_BASE);
} else if (GRAPHICS_VER(i915) == 2) {
init_unused_ring(gt, SRB0_BASE);
init_unused_ring(gt, SRB1_BASE);
} else if (GRAPHICS_VER(i915) == 3) {
init_unused_ring(gt, PRB1_BASE);
init_unused_ring(gt, PRB2_BASE);
}
}
int intel_gt_init_hw(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
int ret;
gt->last_init_time = ktime_get();
/* Double layer security blanket, see i915_gem_init() */
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
if (HAS_EDRAM(i915) && GRAPHICS_VER(i915) < 9)
intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
if (IS_HASWELL(i915))
intel_uncore_write(uncore,
HSW_MI_PREDICATE_RESULT_2,
IS_HASWELL_GT3(i915) ?
LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
/* Apply the GT workarounds... */
intel_gt_apply_workarounds(gt);
/* ...and determine whether they are sticking. */
intel_gt_verify_workarounds(gt, "init");
intel_gt_init_swizzling(gt);
/*
* At least 830 can leave some of the unused rings
* "active" (ie. head != tail) after resume which
* will prevent c3 entry. Makes sure all unused rings
* are totally idle.
*/
init_unused_rings(gt);
ret = i915_ppgtt_init_hw(gt);
if (ret) {
gt_err(gt, "Enabling PPGTT failed (%d)\n", ret);
goto out;
}
/* We can't enable contexts until all firmware is loaded */
ret = intel_uc_init_hw(>->uc);
if (ret) {
gt_probe_error(gt, "Enabling uc failed (%d)\n", ret);
goto out;
}
intel_mocs_init(gt);
out:
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
return ret;
}
static void gen6_clear_engine_error_register(struct intel_engine_cs *engine)
{
GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
GEN6_RING_FAULT_REG_POSTING_READ(engine);
}
i915_reg_t intel_gt_perf_limit_reasons_reg(struct intel_gt *gt)
{
/* GT0_PERF_LIMIT_REASONS is available only for Gen11+ */
if (GRAPHICS_VER(gt->i915) < 11)
return INVALID_MMIO_REG;
return gt->type == GT_MEDIA ?
MTL_MEDIA_PERF_LIMIT_REASONS : GT0_PERF_LIMIT_REASONS;
}
void
intel_gt_clear_error_registers(struct intel_gt *gt,
intel_engine_mask_t engine_mask)
{
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
u32 eir;
if (GRAPHICS_VER(i915) != 2)
intel_uncore_write(uncore, PGTBL_ER, 0);
if (GRAPHICS_VER(i915) < 4)
intel_uncore_write(uncore, IPEIR(RENDER_RING_BASE), 0);
else
intel_uncore_write(uncore, IPEIR_I965, 0);
intel_uncore_write(uncore, EIR, 0);
eir = intel_uncore_read(uncore, EIR);
if (eir) {
/*
* some errors might have become stuck,
* mask them.
*/
gt_dbg(gt, "EIR stuck: 0x%08x, masking\n", eir);
intel_uncore_rmw(uncore, EMR, 0, eir);
intel_uncore_write(uncore, GEN2_IIR,
I915_MASTER_ERROR_INTERRUPT);
}
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
intel_gt_mcr_multicast_rmw(gt, XEHP_RING_FAULT_REG,
RING_FAULT_VALID, 0);
intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG);
} else if (GRAPHICS_VER(i915) >= 12) {
intel_uncore_rmw(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID, 0);
intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
} else if (GRAPHICS_VER(i915) >= 8) {
intel_uncore_rmw(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID, 0);
intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
} else if (GRAPHICS_VER(i915) >= 6) {
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine_masked(engine, gt, engine_mask, id)
gen6_clear_engine_error_register(engine);
}
}
static void gen6_check_faults(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
u32 fault;
for_each_engine(engine, gt, id) {
fault = GEN6_RING_FAULT_REG_READ(engine);
if (fault & RING_FAULT_VALID) {
gt_dbg(gt, "Unexpected fault\n"
"\tAddr: 0x%08lx\n"
"\tAddress space: %s\n"
"\tSource ID: %d\n"
"\tType: %d\n",
fault & PAGE_MASK,
fault & RING_FAULT_GTTSEL_MASK ?
"GGTT" : "PPGTT",
RING_FAULT_SRCID(fault),
RING_FAULT_FAULT_TYPE(fault));
}
}
}
static void xehp_check_faults(struct intel_gt *gt)
{
u32 fault;
/*
* Although the fault register now lives in an MCR register range,
* the GAM registers are special and we only truly need to read
* the "primary" GAM instance rather than handling each instance
* individually. intel_gt_mcr_read_any() will automatically steer
* toward the primary instance.
*/
fault = intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG);
if (fault & RING_FAULT_VALID) {
u32 fault_data0, fault_data1;
u64 fault_addr;
fault_data0 = intel_gt_mcr_read_any(gt, XEHP_FAULT_TLB_DATA0);
fault_data1 = intel_gt_mcr_read_any(gt, XEHP_FAULT_TLB_DATA1);
fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
((u64)fault_data0 << 12);
gt_dbg(gt, "Unexpected fault\n"
"\tAddr: 0x%08x_%08x\n"
"\tAddress space: %s\n"
"\tEngine ID: %d\n"
"\tSource ID: %d\n"
"\tType: %d\n",
upper_32_bits(fault_addr), lower_32_bits(fault_addr),
fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
GEN8_RING_FAULT_ENGINE_ID(fault),
RING_FAULT_SRCID(fault),
RING_FAULT_FAULT_TYPE(fault));
}
}
static void gen8_check_faults(struct intel_gt *gt)
{
struct intel_uncore *uncore = gt->uncore;
i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg;
u32 fault;
if (GRAPHICS_VER(gt->i915) >= 12) {
fault_reg = GEN12_RING_FAULT_REG;
fault_data0_reg = GEN12_FAULT_TLB_DATA0;
fault_data1_reg = GEN12_FAULT_TLB_DATA1;
} else {
fault_reg = GEN8_RING_FAULT_REG;
fault_data0_reg = GEN8_FAULT_TLB_DATA0;
fault_data1_reg = GEN8_FAULT_TLB_DATA1;
}
fault = intel_uncore_read(uncore, fault_reg);
if (fault & RING_FAULT_VALID) {
u32 fault_data0, fault_data1;
u64 fault_addr;
fault_data0 = intel_uncore_read(uncore, fault_data0_reg);
fault_data1 = intel_uncore_read(uncore, fault_data1_reg);
fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
((u64)fault_data0 << 12);
gt_dbg(gt, "Unexpected fault\n"
"\tAddr: 0x%08x_%08x\n"
"\tAddress space: %s\n"
"\tEngine ID: %d\n"
"\tSource ID: %d\n"
"\tType: %d\n",
upper_32_bits(fault_addr), lower_32_bits(fault_addr),
fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
GEN8_RING_FAULT_ENGINE_ID(fault),
RING_FAULT_SRCID(fault),
RING_FAULT_FAULT_TYPE(fault));
}
}
void intel_gt_check_and_clear_faults(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
/* From GEN8 onwards we only have one 'All Engine Fault Register' */
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
xehp_check_faults(gt);
else if (GRAPHICS_VER(i915) >= 8)
gen8_check_faults(gt);
else if (GRAPHICS_VER(i915) >= 6)
gen6_check_faults(gt);
else
return;
intel_gt_clear_error_registers(gt, ALL_ENGINES);
}
void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
{
struct intel_uncore *uncore = gt->uncore;
intel_wakeref_t wakeref;
/*
* No actual flushing is required for the GTT write domain for reads
* from the GTT domain. Writes to it "immediately" go to main memory
* as far as we know, so there's no chipset flush. It also doesn't
* land in the GPU render cache.
*
* However, we do have to enforce the order so that all writes through
* the GTT land before any writes to the device, such as updates to
* the GATT itself.
*
* We also have to wait a bit for the writes to land from the GTT.
* An uncached read (i.e. mmio) seems to be ideal for the round-trip
* timing. This issue has only been observed when switching quickly
* between GTT writes and CPU reads from inside the kernel on recent hw,
* and it appears to only affect discrete GTT blocks (i.e. on LLC
* system agents we cannot reproduce this behaviour, until Cannonlake
* that was!).
*/
wmb();
if (INTEL_INFO(gt->i915)->has_coherent_ggtt)
return;
intel_gt_chipset_flush(gt);
with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref) {
unsigned long flags;
spin_lock_irqsave(&uncore->lock, flags);
intel_uncore_posting_read_fw(uncore,
RING_HEAD(RENDER_RING_BASE));
spin_unlock_irqrestore(&uncore->lock, flags);
}
}
void intel_gt_chipset_flush(struct intel_gt *gt)
{
wmb();
if (GRAPHICS_VER(gt->i915) < 6)
intel_ggtt_gmch_flush();
}
void intel_gt_driver_register(struct intel_gt *gt)
{
intel_gsc_init(>->gsc, gt->i915);
intel_rps_driver_register(>->rps);
intel_gt_debugfs_register(gt);
intel_gt_sysfs_register(gt);
}
static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
{
struct drm_i915_private *i915 = gt->i915;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int ret;
obj = i915_gem_object_create_lmem(i915, size,
I915_BO_ALLOC_VOLATILE |
I915_BO_ALLOC_GPU_ONLY);
if (IS_ERR(obj) && !IS_METEORLAKE(i915)) /* Wa_22018444074 */
obj = i915_gem_object_create_stolen(i915, size);
if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj)) {
gt_err(gt, "Failed to allocate scratch page\n");
return PTR_ERR(obj);
}
vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_unref;
}
ret = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH);
if (ret)
goto err_unref;
gt->scratch = i915_vma_make_unshrinkable(vma);
return 0;
err_unref:
i915_gem_object_put(obj);
return ret;
}
static void intel_gt_fini_scratch(struct intel_gt *gt)
{
i915_vma_unpin_and_release(>->scratch, 0);
}
static struct i915_address_space *kernel_vm(struct intel_gt *gt)
{
if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING)
return &i915_ppgtt_create(gt, I915_BO_ALLOC_PM_EARLY)->vm;
else
return i915_vm_get(>->ggtt->vm);
}
static int __engines_record_defaults(struct intel_gt *gt)
{
struct i915_request *requests[I915_NUM_ENGINES] = {};
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = 0;
/*
* As we reset the gpu during very early sanitisation, the current
* register state on the GPU should reflect its defaults values.
* We load a context onto the hw (with restore-inhibit), then switch
* over to a second context to save that default register state. We
* can then prime every new context with that state so they all start
* from the same default HW values.
*/
for_each_engine(engine, gt, id) {
struct intel_renderstate so;
struct intel_context *ce;
struct i915_request *rq;
/* We must be able to switch to something! */
GEM_BUG_ON(!engine->kernel_context);
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out;
}
err = intel_renderstate_init(&so, ce);
if (err)
goto err;
rq = i915_request_create(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_fini;
}
err = intel_engine_emit_ctx_wa(rq);
if (err)
goto err_rq;
err = intel_renderstate_emit(&so, rq);
if (err)
goto err_rq;
err_rq:
requests[id] = i915_request_get(rq);
i915_request_add(rq);
err_fini:
intel_renderstate_fini(&so, ce);
err:
if (err) {
intel_context_put(ce);
goto out;
}
}
/* Flush the default context image to memory, and enable powersaving. */
if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
err = -EIO;
goto out;
}
for (id = 0; id < ARRAY_SIZE(requests); id++) {
struct i915_request *rq;
struct file *state;
rq = requests[id];
if (!rq)
continue;
if (rq->fence.error) {
err = -EIO;
goto out;
}
GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
if (!rq->context->state)
continue;
/* Keep a copy of the state's backing pages; free the obj */
state = shmem_create_from_object(rq->context->state->obj);
if (IS_ERR(state)) {
err = PTR_ERR(state);
goto out;
}
rq->engine->default_state = state;
}
out:
/*
* If we have to abandon now, we expect the engines to be idle
* and ready to be torn-down. The quickest way we can accomplish
* this is by declaring ourselves wedged.
*/
if (err)
intel_gt_set_wedged(gt);
for (id = 0; id < ARRAY_SIZE(requests); id++) {
struct intel_context *ce;
struct i915_request *rq;
rq = requests[id];
if (!rq)
continue;
ce = rq->context;
i915_request_put(rq);
intel_context_put(ce);
}
return err;
}
static int __engines_verify_workarounds(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = 0;
if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
return 0;
for_each_engine(engine, gt, id) {
if (intel_engine_verify_workarounds(engine, "load"))
err = -EIO;
}
/* Flush and restore the kernel context for safety */
if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME)
err = -EIO;
return err;
}
static void __intel_gt_disable(struct intel_gt *gt)
{
intel_gt_set_wedged_on_fini(gt);
intel_gt_suspend_prepare(gt);
intel_gt_suspend_late(gt);
GEM_BUG_ON(intel_gt_pm_is_awake(gt));
}
int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
{
long remaining_timeout;
/* If the device is asleep, we have no requests outstanding */
if (!intel_gt_pm_is_awake(gt))
return 0;
while ((timeout = intel_gt_retire_requests_timeout(gt, timeout,
&remaining_timeout)) > 0) {
cond_resched();
if (signal_pending(current))
return -EINTR;
}
if (timeout)
return timeout;
if (remaining_timeout < 0)
remaining_timeout = 0;
return intel_uc_wait_for_idle(>->uc, remaining_timeout);
}
int intel_gt_init(struct intel_gt *gt)
{
int err;
err = i915_inject_probe_error(gt->i915, -ENODEV);
if (err)
return err;
intel_gt_init_workarounds(gt);
/*
* This is just a security blanket to placate dragons.
* On some systems, we very sporadically observe that the first TLBs
* used by the CS may be stale, despite us poking the TLB reset. If
* we hold the forcewake during initialisation these problems
* just magically go away.
*/
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
err = intel_gt_init_scratch(gt,
GRAPHICS_VER(gt->i915) == 2 ? SZ_256K : SZ_4K);
if (err)
goto out_fw;
intel_gt_pm_init(gt);
gt->vm = kernel_vm(gt);
if (!gt->vm) {
err = -ENOMEM;
goto err_pm;
}
intel_set_mocs_index(gt);
err = intel_engines_init(gt);
if (err)
goto err_engines;
err = intel_uc_init(>->uc);
if (err)
goto err_engines;
err = intel_gt_resume(gt);
if (err)
goto err_uc_init;
err = intel_gt_init_hwconfig(gt);
if (err)
gt_err(gt, "Failed to retrieve hwconfig table: %pe\n", ERR_PTR(err));
err = __engines_record_defaults(gt);
if (err)
goto err_gt;
err = __engines_verify_workarounds(gt);
if (err)
goto err_gt;
err = i915_inject_probe_error(gt->i915, -EIO);
if (err)
goto err_gt;
intel_uc_init_late(>->uc);
intel_migrate_init(>->migrate, gt);
goto out_fw;
err_gt:
__intel_gt_disable(gt);
intel_uc_fini_hw(>->uc);
err_uc_init:
intel_uc_fini(>->uc);
err_engines:
intel_engines_release(gt);
i915_vm_put(fetch_and_zero(>->vm));
err_pm:
intel_gt_pm_fini(gt);
intel_gt_fini_scratch(gt);
out_fw:
if (err)
intel_gt_set_wedged_on_init(gt);
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
return err;
}
void intel_gt_driver_remove(struct intel_gt *gt)
{
__intel_gt_disable(gt);
intel_migrate_fini(>->migrate);
intel_uc_driver_remove(>->uc);
intel_engines_release(gt);
intel_gt_flush_buffer_pool(gt);
}
void intel_gt_driver_unregister(struct intel_gt *gt)
{
intel_wakeref_t wakeref;
intel_gt_sysfs_unregister(gt);
intel_rps_driver_unregister(>->rps);
intel_gsc_fini(>->gsc);
/*
* If we unload the driver and wedge before the GSC worker is complete,
* the worker will hit an error on its submission to the GSC engine and
* then exit. This is hard to hit for a user, but it is reproducible
* with skipping selftests. The error is handled gracefully by the
* worker, so there are no functional issues, but we still end up with
* an error message in dmesg, which is something we want to avoid as
* this is a supported scenario. We could modify the worker to better
* handle a wedging occurring during its execution, but that gets
* complicated for a couple of reasons:
* - We do want the error on runtime wedging, because there are
* implications for subsystems outside of GT (i.e., PXP, HDCP), it's
* only the error on driver unload that we want to silence.
* - The worker is responsible for multiple submissions (GSC FW load,
* HuC auth, SW proxy), so all of those will have to be adapted to
* handle the wedged_on_fini scenario.
* Therefore, it's much simpler to just wait for the worker to be done
* before wedging on driver removal, also considering that the worker
* will likely already be idle in the great majority of non-selftest
* scenarios.
*/
intel_gsc_uc_flush_work(>->uc.gsc);
/*
* Upon unregistering the device to prevent any new users, cancel
* all in-flight requests so that we can quickly unbind the active
* resources.
*/
intel_gt_set_wedged_on_fini(gt);
/* Scrub all HW state upon release */
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
__intel_gt_reset(gt, ALL_ENGINES);
}
void intel_gt_driver_release(struct intel_gt *gt)
{
struct i915_address_space *vm;
vm = fetch_and_zero(>->vm);
if (vm) /* FIXME being called twice on error paths :( */
i915_vm_put(vm);
intel_wa_list_free(>->wa_list);
intel_gt_pm_fini(gt);
intel_gt_fini_scratch(gt);
intel_gt_fini_buffer_pool(gt);
intel_gt_fini_hwconfig(gt);
}
void intel_gt_driver_late_release_all(struct drm_i915_private *i915)
{
struct intel_gt *gt;
unsigned int id;
/* We need to wait for inflight RCU frees to release their grip */
rcu_barrier();
for_each_gt(gt, i915, id) {
intel_uc_driver_late_release(>->uc);
intel_gt_fini_requests(gt);
intel_gt_fini_reset(gt);
intel_gt_fini_timelines(gt);
intel_gt_fini_tlb(gt);
intel_engines_free(gt);
}
}
static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
{
int ret;
if (!gt_is_root(gt)) {
struct intel_uncore *uncore;
spinlock_t *irq_lock;
uncore = drmm_kzalloc(>->i915->drm, sizeof(*uncore), GFP_KERNEL);
if (!uncore)
return -ENOMEM;
irq_lock = drmm_kzalloc(>->i915->drm, sizeof(*irq_lock), GFP_KERNEL);
if (!irq_lock)
return -ENOMEM;
gt->uncore = uncore;
gt->irq_lock = irq_lock;
intel_gt_common_init_early(gt);
}
intel_uncore_init_early(gt->uncore, gt);
ret = intel_uncore_setup_mmio(gt->uncore, phys_addr);
if (ret)
return ret;
gt->phys_addr = phys_addr;
return 0;
}
int intel_gt_probe_all(struct drm_i915_private *i915)
{
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct intel_gt *gt = to_gt(i915);
const struct intel_gt_definition *gtdef;
phys_addr_t phys_addr;
unsigned int mmio_bar;
unsigned int i;
int ret;
mmio_bar = intel_mmio_bar(GRAPHICS_VER(i915));
phys_addr = pci_resource_start(pdev, mmio_bar);
/*
* We always have at least one primary GT on any device
* and it has been already initialized early during probe
* in i915_driver_probe()
*/
gt->i915 = i915;
gt->name = "Primary GT";
gt->info.engine_mask = INTEL_INFO(i915)->platform_engine_mask;
gt_dbg(gt, "Setting up %s\n", gt->name);
ret = intel_gt_tile_setup(gt, phys_addr);
if (ret)
return ret;
i915->gt[0] = gt;
if (!HAS_EXTRA_GT_LIST(i915))
return 0;
for (i = 1, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1];
gtdef->name != NULL;
i++, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1]) {
gt = drmm_kzalloc(&i915->drm, sizeof(*gt), GFP_KERNEL);
if (!gt) {
ret = -ENOMEM;
goto err;
}
gt->i915 = i915;
gt->name = gtdef->name;
gt->type = gtdef->type;
gt->info.engine_mask = gtdef->engine_mask;
gt->info.id = i;
gt_dbg(gt, "Setting up %s\n", gt->name);
if (GEM_WARN_ON(range_overflows_t(resource_size_t,
gtdef->mapping_base,
SZ_16M,
pci_resource_len(pdev, mmio_bar)))) {
ret = -ENODEV;
goto err;
}
switch (gtdef->type) {
case GT_TILE:
ret = intel_gt_tile_setup(gt, phys_addr + gtdef->mapping_base);
break;
case GT_MEDIA:
ret = intel_sa_mediagt_setup(gt, phys_addr + gtdef->mapping_base,
gtdef->gsi_offset);
break;
case GT_PRIMARY:
/* Primary GT should not appear in extra GT list */
default:
MISSING_CASE(gtdef->type);
ret = -ENODEV;
}
if (ret)
goto err;
i915->gt[i] = gt;
}
return 0;
err:
i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret);
intel_gt_release_all(i915);
return ret;
}
int intel_gt_tiles_init(struct drm_i915_private *i915)
{
struct intel_gt *gt;
unsigned int id;
int ret;
for_each_gt(gt, i915, id) {
ret = intel_gt_probe_lmem(gt);
if (ret)
return ret;
}
return 0;
}
void intel_gt_release_all(struct drm_i915_private *i915)
{
struct intel_gt *gt;
unsigned int id;
for_each_gt(gt, i915, id)
i915->gt[id] = NULL;
}
void intel_gt_info_print(const struct intel_gt_info *info,
struct drm_printer *p)
{
drm_printf(p, "available engines: %x\n", info->engine_mask);
intel_sseu_dump(&info->sseu, p);
}
enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt,
struct drm_i915_gem_object *obj,
bool always_coherent)
{
/*
* Wa_22016122933: always return I915_MAP_WC for Media
* version 13.0 when the object is on the Media GT
*/
if (i915_gem_object_is_lmem(obj) || intel_gt_needs_wa_22016122933(gt))
return I915_MAP_WC;
if (HAS_LLC(gt->i915) || always_coherent)
return I915_MAP_WB;
else
return I915_MAP_WC;
}
| linux-master | drivers/gpu/drm/i915/gt/intel_gt.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
#include "selftest_llc.h"
#include "intel_rps.h"
static int gen6_verify_ring_freq(struct intel_llc *llc)
{
struct drm_i915_private *i915 = llc_to_gt(llc)->i915;
struct ia_constants consts;
intel_wakeref_t wakeref;
unsigned int gpu_freq;
int err = 0;
wakeref = intel_runtime_pm_get(llc_to_gt(llc)->uncore->rpm);
if (!get_ia_constants(llc, &consts))
goto out_rpm;
for (gpu_freq = consts.min_gpu_freq;
gpu_freq <= consts.max_gpu_freq;
gpu_freq++) {
struct intel_rps *rps = &llc_to_gt(llc)->rps;
unsigned int ia_freq, ring_freq, found;
u32 val;
calc_ia_freq(llc, gpu_freq, &consts, &ia_freq, &ring_freq);
val = gpu_freq;
if (snb_pcode_read(llc_to_gt(llc)->uncore, GEN6_PCODE_READ_MIN_FREQ_TABLE,
&val, NULL)) {
pr_err("Failed to read freq table[%d], range [%d, %d]\n",
gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq);
err = -ENXIO;
break;
}
found = (val >> 0) & 0xff;
if (found != ia_freq) {
pr_err("Min freq table(%d/[%d, %d]):%dMHz did not match expected CPU freq, found %d, expected %d\n",
gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq,
intel_gpu_freq(rps, gpu_freq * (GRAPHICS_VER(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
found, ia_freq);
err = -EINVAL;
break;
}
found = (val >> 8) & 0xff;
if (found != ring_freq) {
pr_err("Min freq table(%d/[%d, %d]):%dMHz did not match expected ring freq, found %d, expected %d\n",
gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq,
intel_gpu_freq(rps, gpu_freq * (GRAPHICS_VER(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
found, ring_freq);
err = -EINVAL;
break;
}
}
out_rpm:
intel_runtime_pm_put(llc_to_gt(llc)->uncore->rpm, wakeref);
return err;
}
int st_llc_verify(struct intel_llc *llc)
{
return gen6_verify_ring_freq(llc);
}
| linux-master | drivers/gpu/drm/i915/gt/selftest_llc.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
#include <linux/log2.h>
#include "gem/i915_gem_lmem.h"
#include "gen8_ppgtt.h"
#include "i915_scatterlist.h"
#include "i915_trace.h"
#include "i915_pvinfo.h"
#include "i915_vgpu.h"
#include "intel_gt.h"
#include "intel_gtt.h"
static u64 gen8_pde_encode(const dma_addr_t addr,
const enum i915_cache_level level)
{
u64 pde = addr | GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
if (level != I915_CACHE_NONE)
pde |= PPAT_CACHED_PDE;
else
pde |= PPAT_UNCACHED;
return pde;
}
static u64 gen8_pte_encode(dma_addr_t addr,
unsigned int pat_index,
u32 flags)
{
gen8_pte_t pte = addr | GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
if (unlikely(flags & PTE_READ_ONLY))
pte &= ~GEN8_PAGE_RW;
/*
* For pre-gen12 platforms pat_index is the same as enum
* i915_cache_level, so the switch-case here is still valid.
* See translation table defined by LEGACY_CACHELEVEL.
*/
switch (pat_index) {
case I915_CACHE_NONE:
pte |= PPAT_UNCACHED;
break;
case I915_CACHE_WT:
pte |= PPAT_DISPLAY_ELLC;
break;
default:
pte |= PPAT_CACHED;
break;
}
return pte;
}
static u64 gen12_pte_encode(dma_addr_t addr,
unsigned int pat_index,
u32 flags)
{
gen8_pte_t pte = addr | GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
if (unlikely(flags & PTE_READ_ONLY))
pte &= ~GEN8_PAGE_RW;
if (flags & PTE_LM)
pte |= GEN12_PPGTT_PTE_LM;
if (pat_index & BIT(0))
pte |= GEN12_PPGTT_PTE_PAT0;
if (pat_index & BIT(1))
pte |= GEN12_PPGTT_PTE_PAT1;
if (pat_index & BIT(2))
pte |= GEN12_PPGTT_PTE_PAT2;
if (pat_index & BIT(3))
pte |= MTL_PPGTT_PTE_PAT3;
return pte;
}
static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
{
struct drm_i915_private *i915 = ppgtt->vm.i915;
struct intel_uncore *uncore = ppgtt->vm.gt->uncore;
enum vgt_g2v_type msg;
int i;
if (create)
atomic_inc(px_used(ppgtt->pd)); /* never remove */
else
atomic_dec(px_used(ppgtt->pd));
mutex_lock(&i915->vgpu.lock);
if (i915_vm_is_4lvl(&ppgtt->vm)) {
const u64 daddr = px_dma(ppgtt->pd);
intel_uncore_write(uncore,
vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
intel_uncore_write(uncore,
vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
msg = create ?
VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY;
} else {
for (i = 0; i < GEN8_3LVL_PDPES; i++) {
const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
intel_uncore_write(uncore,
vgtif_reg(pdp[i].lo),
lower_32_bits(daddr));
intel_uncore_write(uncore,
vgtif_reg(pdp[i].hi),
upper_32_bits(daddr));
}
msg = create ?
VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY;
}
/* g2v_notify atomically (via hv trap) consumes the message packet. */
intel_uncore_write(uncore, vgtif_reg(g2v_notify), msg);
mutex_unlock(&i915->vgpu.lock);
}
/* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
#define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
#define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
#define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
#define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
#define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
#define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
#define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
#define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
static unsigned int
gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
{
const int shift = gen8_pd_shift(lvl);
const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
GEM_BUG_ON(start >= end);
end += ~mask >> gen8_pd_shift(1);
*idx = i915_pde_index(start, shift);
if ((start ^ end) & mask)
return GEN8_PDES - *idx;
else
return i915_pde_index(end, shift) - *idx;
}
static bool gen8_pd_contains(u64 start, u64 end, int lvl)
{
const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
GEM_BUG_ON(start >= end);
return (start ^ end) & mask && (start & ~mask) == 0;
}
static unsigned int gen8_pt_count(u64 start, u64 end)
{
GEM_BUG_ON(start >= end);
if ((start ^ end) >> gen8_pd_shift(1))
return GEN8_PDES - (start & (GEN8_PDES - 1));
else
return end - start;
}
static unsigned int gen8_pd_top_count(const struct i915_address_space *vm)
{
unsigned int shift = __gen8_pte_shift(vm->top);
return (vm->total + (1ull << shift) - 1) >> shift;
}
static struct i915_page_directory *
gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
{
struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
if (vm->top == 2)
return ppgtt->pd;
else
return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
}
static struct i915_page_directory *
gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
{
return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
}
static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
struct i915_page_directory *pd,
int count, int lvl)
{
if (lvl) {
void **pde = pd->entry;
do {
if (!*pde)
continue;
__gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
} while (pde++, --count);
}
free_px(vm, &pd->pt, lvl);
}
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
if (intel_vgpu_active(vm->i915))
gen8_ppgtt_notify_vgt(ppgtt, false);
if (ppgtt->pd)
__gen8_ppgtt_cleanup(vm, ppgtt->pd,
gen8_pd_top_count(vm), vm->top);
free_scratch(vm);
}
static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
struct i915_page_directory * const pd,
u64 start, const u64 end, int lvl)
{
const struct drm_i915_gem_object * const scratch = vm->scratch[lvl];
unsigned int idx, len;
GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
len = gen8_pd_range(start, end, lvl--, &idx);
DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
__func__, vm, lvl + 1, start, end,
idx, len, atomic_read(px_used(pd)));
GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
do {
struct i915_page_table *pt = pd->entry[idx];
if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
gen8_pd_contains(start, end, lvl)) {
DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
__func__, vm, lvl + 1, idx, start, end);
clear_pd_entry(pd, idx, scratch);
__gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
start += (u64)I915_PDES << gen8_pd_shift(lvl);
continue;
}
if (lvl) {
start = __gen8_ppgtt_clear(vm, as_pd(pt),
start, end, lvl);
} else {
unsigned int count;
unsigned int pte = gen8_pd_index(start, 0);
unsigned int num_ptes;
u64 *vaddr;
count = gen8_pt_count(start, end);
DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
__func__, vm, lvl, start, end,
gen8_pd_index(start, 0), count,
atomic_read(&pt->used));
GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
num_ptes = count;
if (pt->is_compact) {
GEM_BUG_ON(num_ptes % 16);
GEM_BUG_ON(pte % 16);
num_ptes /= 16;
pte /= 16;
}
vaddr = px_vaddr(pt);
memset64(vaddr + pte,
vm->scratch[0]->encode,
num_ptes);
atomic_sub(count, &pt->used);
start += count;
}
if (release_pd_entry(pd, idx, pt, scratch))
free_px(vm, pt, lvl);
} while (idx++, --len);
return start;
}
static void gen8_ppgtt_clear(struct i915_address_space *vm,
u64 start, u64 length)
{
GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
GEM_BUG_ON(range_overflows(start, length, vm->total));
start >>= GEN8_PTE_SHIFT;
length >>= GEN8_PTE_SHIFT;
GEM_BUG_ON(length == 0);
__gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
start, start + length, vm->top);
}
static void __gen8_ppgtt_alloc(struct i915_address_space * const vm,
struct i915_vm_pt_stash *stash,
struct i915_page_directory * const pd,
u64 * const start, const u64 end, int lvl)
{
unsigned int idx, len;
GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
len = gen8_pd_range(*start, end, lvl--, &idx);
DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
__func__, vm, lvl + 1, *start, end,
idx, len, atomic_read(px_used(pd)));
GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
spin_lock(&pd->lock);
GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
do {
struct i915_page_table *pt = pd->entry[idx];
if (!pt) {
spin_unlock(&pd->lock);
DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
__func__, vm, lvl + 1, idx);
pt = stash->pt[!!lvl];
__i915_gem_object_pin_pages(pt->base);
fill_px(pt, vm->scratch[lvl]->encode);
spin_lock(&pd->lock);
if (likely(!pd->entry[idx])) {
stash->pt[!!lvl] = pt->stash;
atomic_set(&pt->used, 0);
set_pd_entry(pd, idx, pt);
} else {
pt = pd->entry[idx];
}
}
if (lvl) {
atomic_inc(&pt->used);
spin_unlock(&pd->lock);
__gen8_ppgtt_alloc(vm, stash,
as_pd(pt), start, end, lvl);
spin_lock(&pd->lock);
atomic_dec(&pt->used);
GEM_BUG_ON(!atomic_read(&pt->used));
} else {
unsigned int count = gen8_pt_count(*start, end);
DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
__func__, vm, lvl, *start, end,
gen8_pd_index(*start, 0), count,
atomic_read(&pt->used));
atomic_add(count, &pt->used);
/* All other pdes may be simultaneously removed */
GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES);
*start += count;
}
} while (idx++, --len);
spin_unlock(&pd->lock);
}
static void gen8_ppgtt_alloc(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
u64 start, u64 length)
{
GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
GEM_BUG_ON(range_overflows(start, length, vm->total));
start >>= GEN8_PTE_SHIFT;
length >>= GEN8_PTE_SHIFT;
GEM_BUG_ON(length == 0);
__gen8_ppgtt_alloc(vm, stash, i915_vm_to_ppgtt(vm)->pd,
&start, start + length, vm->top);
}
static void __gen8_ppgtt_foreach(struct i915_address_space *vm,
struct i915_page_directory *pd,
u64 *start, u64 end, int lvl,
void (*fn)(struct i915_address_space *vm,
struct i915_page_table *pt,
void *data),
void *data)
{
unsigned int idx, len;
len = gen8_pd_range(*start, end, lvl--, &idx);
spin_lock(&pd->lock);
do {
struct i915_page_table *pt = pd->entry[idx];
atomic_inc(&pt->used);
spin_unlock(&pd->lock);
if (lvl) {
__gen8_ppgtt_foreach(vm, as_pd(pt), start, end, lvl,
fn, data);
} else {
fn(vm, pt, data);
*start += gen8_pt_count(*start, end);
}
spin_lock(&pd->lock);
atomic_dec(&pt->used);
} while (idx++, --len);
spin_unlock(&pd->lock);
}
static void gen8_ppgtt_foreach(struct i915_address_space *vm,
u64 start, u64 length,
void (*fn)(struct i915_address_space *vm,
struct i915_page_table *pt,
void *data),
void *data)
{
start >>= GEN8_PTE_SHIFT;
length >>= GEN8_PTE_SHIFT;
__gen8_ppgtt_foreach(vm, i915_vm_to_ppgtt(vm)->pd,
&start, start + length, vm->top,
fn, data);
}
static __always_inline u64
gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
struct i915_page_directory *pdp,
struct sgt_dma *iter,
u64 idx,
unsigned int pat_index,
u32 flags)
{
struct i915_page_directory *pd;
const gen8_pte_t pte_encode = ppgtt->vm.pte_encode(0, pat_index, flags);
gen8_pte_t *vaddr;
pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
do {
GEM_BUG_ON(sg_dma_len(iter->sg) < I915_GTT_PAGE_SIZE);
vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
iter->dma += I915_GTT_PAGE_SIZE;
if (iter->dma >= iter->max) {
iter->sg = __sg_next(iter->sg);
if (!iter->sg || sg_dma_len(iter->sg) == 0) {
idx = 0;
break;
}
iter->dma = sg_dma_address(iter->sg);
iter->max = iter->dma + sg_dma_len(iter->sg);
}
if (gen8_pd_index(++idx, 0) == 0) {
if (gen8_pd_index(idx, 1) == 0) {
/* Limited by sg length for 3lvl */
if (gen8_pd_index(idx, 2) == 0)
break;
pd = pdp->entry[gen8_pd_index(idx, 2)];
}
drm_clflush_virt_range(vaddr, PAGE_SIZE);
vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
}
} while (1);
drm_clflush_virt_range(vaddr, PAGE_SIZE);
return idx;
}
static void
xehpsdv_ppgtt_insert_huge(struct i915_address_space *vm,
struct i915_vma_resource *vma_res,
struct sgt_dma *iter,
unsigned int pat_index,
u32 flags)
{
const gen8_pte_t pte_encode = vm->pte_encode(0, pat_index, flags);
unsigned int rem = sg_dma_len(iter->sg);
u64 start = vma_res->start;
u64 end = start + vma_res->vma_size;
GEM_BUG_ON(!i915_vm_is_4lvl(vm));
do {
struct i915_page_directory * const pdp =
gen8_pdp_for_page_address(vm, start);
struct i915_page_directory * const pd =
i915_pd_entry(pdp, __gen8_pte_index(start, 2));
struct i915_page_table *pt =
i915_pt_entry(pd, __gen8_pte_index(start, 1));
gen8_pte_t encode = pte_encode;
unsigned int page_size;
gen8_pte_t *vaddr;
u16 index, max, nent, i;
max = I915_PDES;
nent = 1;
if (vma_res->bi.page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
rem >= I915_GTT_PAGE_SIZE_2M &&
!__gen8_pte_index(start, 0)) {
index = __gen8_pte_index(start, 1);
encode |= GEN8_PDE_PS_2M;
page_size = I915_GTT_PAGE_SIZE_2M;
vaddr = px_vaddr(pd);
} else {
index = __gen8_pte_index(start, 0);
page_size = I915_GTT_PAGE_SIZE;
if (vma_res->bi.page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
/*
* Device local-memory on these platforms should
* always use 64K pages or larger (including GTT
* alignment), therefore if we know the whole
* page-table needs to be filled we can always
* safely use the compact-layout. Otherwise fall
* back to the TLB hint with PS64. If this is
* system memory we only bother with PS64.
*/
if ((encode & GEN12_PPGTT_PTE_LM) &&
end - start >= SZ_2M && !index) {
index = __gen8_pte_index(start, 0) / 16;
page_size = I915_GTT_PAGE_SIZE_64K;
max /= 16;
vaddr = px_vaddr(pd);
vaddr[__gen8_pte_index(start, 1)] |= GEN12_PDE_64K;
pt->is_compact = true;
} else if (IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
rem >= I915_GTT_PAGE_SIZE_64K &&
!(index % 16)) {
encode |= GEN12_PTE_PS64;
page_size = I915_GTT_PAGE_SIZE_64K;
nent = 16;
}
}
vaddr = px_vaddr(pt);
}
do {
GEM_BUG_ON(rem < page_size);
for (i = 0; i < nent; i++) {
vaddr[index++] =
encode | (iter->dma + i *
I915_GTT_PAGE_SIZE);
}
start += page_size;
iter->dma += page_size;
rem -= page_size;
if (iter->dma >= iter->max) {
iter->sg = __sg_next(iter->sg);
if (!iter->sg)
break;
rem = sg_dma_len(iter->sg);
if (!rem)
break;
iter->dma = sg_dma_address(iter->sg);
iter->max = iter->dma + rem;
if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
break;
}
} while (rem >= page_size && index < max);
drm_clflush_virt_range(vaddr, PAGE_SIZE);
vma_res->page_sizes_gtt |= page_size;
} while (iter->sg && sg_dma_len(iter->sg));
}
static void gen8_ppgtt_insert_huge(struct i915_address_space *vm,
struct i915_vma_resource *vma_res,
struct sgt_dma *iter,
unsigned int pat_index,
u32 flags)
{
const gen8_pte_t pte_encode = vm->pte_encode(0, pat_index, flags);
unsigned int rem = sg_dma_len(iter->sg);
u64 start = vma_res->start;
GEM_BUG_ON(!i915_vm_is_4lvl(vm));
do {
struct i915_page_directory * const pdp =
gen8_pdp_for_page_address(vm, start);
struct i915_page_directory * const pd =
i915_pd_entry(pdp, __gen8_pte_index(start, 2));
gen8_pte_t encode = pte_encode;
unsigned int maybe_64K = -1;
unsigned int page_size;
gen8_pte_t *vaddr;
u16 index;
if (vma_res->bi.page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
rem >= I915_GTT_PAGE_SIZE_2M &&
!__gen8_pte_index(start, 0)) {
index = __gen8_pte_index(start, 1);
encode |= GEN8_PDE_PS_2M;
page_size = I915_GTT_PAGE_SIZE_2M;
vaddr = px_vaddr(pd);
} else {
struct i915_page_table *pt =
i915_pt_entry(pd, __gen8_pte_index(start, 1));
index = __gen8_pte_index(start, 0);
page_size = I915_GTT_PAGE_SIZE;
if (!index &&
vma_res->bi.page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
(IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
maybe_64K = __gen8_pte_index(start, 1);
vaddr = px_vaddr(pt);
}
do {
GEM_BUG_ON(sg_dma_len(iter->sg) < page_size);
vaddr[index++] = encode | iter->dma;
start += page_size;
iter->dma += page_size;
rem -= page_size;
if (iter->dma >= iter->max) {
iter->sg = __sg_next(iter->sg);
if (!iter->sg)
break;
rem = sg_dma_len(iter->sg);
if (!rem)
break;
iter->dma = sg_dma_address(iter->sg);
iter->max = iter->dma + rem;
if (maybe_64K != -1 && index < I915_PDES &&
!(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
(IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
maybe_64K = -1;
if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
break;
}
} while (rem >= page_size && index < I915_PDES);
drm_clflush_virt_range(vaddr, PAGE_SIZE);
/*
* Is it safe to mark the 2M block as 64K? -- Either we have
* filled whole page-table with 64K entries, or filled part of
* it and have reached the end of the sg table and we have
* enough padding.
*/
if (maybe_64K != -1 &&
(index == I915_PDES ||
(i915_vm_has_scratch_64K(vm) &&
!iter->sg && IS_ALIGNED(vma_res->start +
vma_res->node_size,
I915_GTT_PAGE_SIZE_2M)))) {
vaddr = px_vaddr(pd);
vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
drm_clflush_virt_range(vaddr, PAGE_SIZE);
page_size = I915_GTT_PAGE_SIZE_64K;
/*
* We write all 4K page entries, even when using 64K
* pages. In order to verify that the HW isn't cheating
* by using the 4K PTE instead of the 64K PTE, we want
* to remove all the surplus entries. If the HW skipped
* the 64K PTE, it will read/write into the scratch page
* instead - which we detect as missing results during
* selftests.
*/
if (I915_SELFTEST_ONLY(vm->scrub_64K)) {
u16 i;
encode = vm->scratch[0]->encode;
vaddr = px_vaddr(i915_pt_entry(pd, maybe_64K));
for (i = 1; i < index; i += 16)
memset64(vaddr + i, encode, 15);
drm_clflush_virt_range(vaddr, PAGE_SIZE);
}
}
vma_res->page_sizes_gtt |= page_size;
} while (iter->sg && sg_dma_len(iter->sg));
}
static void gen8_ppgtt_insert(struct i915_address_space *vm,
struct i915_vma_resource *vma_res,
unsigned int pat_index,
u32 flags)
{
struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
struct sgt_dma iter = sgt_dma(vma_res);
if (vma_res->bi.page_sizes.sg > I915_GTT_PAGE_SIZE) {
if (GRAPHICS_VER_FULL(vm->i915) >= IP_VER(12, 50))
xehpsdv_ppgtt_insert_huge(vm, vma_res, &iter, pat_index, flags);
else
gen8_ppgtt_insert_huge(vm, vma_res, &iter, pat_index, flags);
} else {
u64 idx = vma_res->start >> GEN8_PTE_SHIFT;
do {
struct i915_page_directory * const pdp =
gen8_pdp_for_page_index(vm, idx);
idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
pat_index, flags);
} while (idx);
vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
}
}
static void gen8_ppgtt_insert_entry(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
unsigned int pat_index,
u32 flags)
{
u64 idx = offset >> GEN8_PTE_SHIFT;
struct i915_page_directory * const pdp =
gen8_pdp_for_page_index(vm, idx);
struct i915_page_directory *pd =
i915_pd_entry(pdp, gen8_pd_index(idx, 2));
struct i915_page_table *pt = i915_pt_entry(pd, gen8_pd_index(idx, 1));
gen8_pte_t *vaddr;
GEM_BUG_ON(pt->is_compact);
vaddr = px_vaddr(pt);
vaddr[gen8_pd_index(idx, 0)] = vm->pte_encode(addr, pat_index, flags);
drm_clflush_virt_range(&vaddr[gen8_pd_index(idx, 0)], sizeof(*vaddr));
}
static void __xehpsdv_ppgtt_insert_entry_lm(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
unsigned int pat_index,
u32 flags)
{
u64 idx = offset >> GEN8_PTE_SHIFT;
struct i915_page_directory * const pdp =
gen8_pdp_for_page_index(vm, idx);
struct i915_page_directory *pd =
i915_pd_entry(pdp, gen8_pd_index(idx, 2));
struct i915_page_table *pt = i915_pt_entry(pd, gen8_pd_index(idx, 1));
gen8_pte_t *vaddr;
GEM_BUG_ON(!IS_ALIGNED(addr, SZ_64K));
GEM_BUG_ON(!IS_ALIGNED(offset, SZ_64K));
/* XXX: we don't strictly need to use this layout */
if (!pt->is_compact) {
vaddr = px_vaddr(pd);
vaddr[gen8_pd_index(idx, 1)] |= GEN12_PDE_64K;
pt->is_compact = true;
}
vaddr = px_vaddr(pt);
vaddr[gen8_pd_index(idx, 0) / 16] = vm->pte_encode(addr, pat_index, flags);
}
static void xehpsdv_ppgtt_insert_entry(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
unsigned int pat_index,
u32 flags)
{
if (flags & PTE_LM)
return __xehpsdv_ppgtt_insert_entry_lm(vm, addr, offset,
pat_index, flags);
return gen8_ppgtt_insert_entry(vm, addr, offset, pat_index, flags);
}
static int gen8_init_scratch(struct i915_address_space *vm)
{
u32 pte_flags;
int ret;
int i;
/*
* If everybody agrees to not to write into the scratch page,
* we can reuse it for all vm, keeping contexts and processes separate.
*/
if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) {
struct i915_address_space *clone = vm->gt->vm;
GEM_BUG_ON(!clone->has_read_only);
vm->scratch_order = clone->scratch_order;
for (i = 0; i <= vm->top; i++)
vm->scratch[i] = i915_gem_object_get(clone->scratch[i]);
return 0;
}
ret = setup_scratch_page(vm);
if (ret)
return ret;
pte_flags = vm->has_read_only;
if (i915_gem_object_is_lmem(vm->scratch[0]))
pte_flags |= PTE_LM;
vm->scratch[0]->encode =
vm->pte_encode(px_dma(vm->scratch[0]),
i915_gem_get_pat_index(vm->i915,
I915_CACHE_NONE),
pte_flags);
for (i = 1; i <= vm->top; i++) {
struct drm_i915_gem_object *obj;
obj = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto free_scratch;
}
ret = map_pt_dma(vm, obj);
if (ret) {
i915_gem_object_put(obj);
goto free_scratch;
}
fill_px(obj, vm->scratch[i - 1]->encode);
obj->encode = gen8_pde_encode(px_dma(obj), I915_CACHE_NONE);
vm->scratch[i] = obj;
}
return 0;
free_scratch:
while (i--)
i915_gem_object_put(vm->scratch[i]);
vm->scratch[0] = NULL;
return ret;
}
static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
{
struct i915_address_space *vm = &ppgtt->vm;
struct i915_page_directory *pd = ppgtt->pd;
unsigned int idx;
GEM_BUG_ON(vm->top != 2);
GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
struct i915_page_directory *pde;
int err;
pde = alloc_pd(vm);
if (IS_ERR(pde))
return PTR_ERR(pde);
err = map_pt_dma(vm, pde->pt.base);
if (err) {
free_pd(vm, pde);
return err;
}
fill_px(pde, vm->scratch[1]->encode);
set_pd_entry(pd, idx, pde);
atomic_inc(px_used(pde)); /* keep pinned */
}
wmb();
return 0;
}
static struct i915_page_directory *
gen8_alloc_top_pd(struct i915_address_space *vm)
{
const unsigned int count = gen8_pd_top_count(vm);
struct i915_page_directory *pd;
int err;
GEM_BUG_ON(count > I915_PDES);
pd = __alloc_pd(count);
if (unlikely(!pd))
return ERR_PTR(-ENOMEM);
pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
if (IS_ERR(pd->pt.base)) {
err = PTR_ERR(pd->pt.base);
pd->pt.base = NULL;
goto err_pd;
}
err = map_pt_dma(vm, pd->pt.base);
if (err)
goto err_pd;
fill_page_dma(px_base(pd), vm->scratch[vm->top]->encode, count);
atomic_inc(px_used(pd)); /* mark as pinned */
return pd;
err_pd:
free_pd(vm, pd);
return ERR_PTR(err);
}
/*
* GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
* with a net effect resembling a 2-level page table in normal x86 terms. Each
* PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
* space.
*
*/
struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
unsigned long lmem_pt_obj_flags)
{
struct i915_page_directory *pd;
struct i915_ppgtt *ppgtt;
int err;
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
if (!ppgtt)
return ERR_PTR(-ENOMEM);
ppgtt_init(ppgtt, gt, lmem_pt_obj_flags);
ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
ppgtt->vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen8_pte_t));
/*
* From bdw, there is hw support for read-only pages in the PPGTT.
*
* Gen11 has HSDES#:1807136187 unresolved. Disable ro support
* for now.
*
* Gen12 has inherited the same read-only fault issue from gen11.
*/
ppgtt->vm.has_read_only = !IS_GRAPHICS_VER(gt->i915, 11, 12);
if (HAS_LMEM(gt->i915))
ppgtt->vm.alloc_pt_dma = alloc_pt_lmem;
else
ppgtt->vm.alloc_pt_dma = alloc_pt_dma;
/*
* Using SMEM here instead of LMEM has the advantage of not reserving
* high performance memory for a "never" used filler page. It also
* removes the device access that would be required to initialise the
* scratch page, reducing pressure on an even scarcer resource.
*/
ppgtt->vm.alloc_scratch_dma = alloc_pt_dma;
if (GRAPHICS_VER(gt->i915) >= 12)
ppgtt->vm.pte_encode = gen12_pte_encode;
else
ppgtt->vm.pte_encode = gen8_pte_encode;
ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
ppgtt->vm.insert_entries = gen8_ppgtt_insert;
if (HAS_64K_PAGES(gt->i915))
ppgtt->vm.insert_page = xehpsdv_ppgtt_insert_entry;
else
ppgtt->vm.insert_page = gen8_ppgtt_insert_entry;
ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
ppgtt->vm.clear_range = gen8_ppgtt_clear;
ppgtt->vm.foreach = gen8_ppgtt_foreach;
ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
err = gen8_init_scratch(&ppgtt->vm);
if (err)
goto err_put;
pd = gen8_alloc_top_pd(&ppgtt->vm);
if (IS_ERR(pd)) {
err = PTR_ERR(pd);
goto err_put;
}
ppgtt->pd = pd;
if (!i915_vm_is_4lvl(&ppgtt->vm)) {
err = gen8_preallocate_top_level_pdp(ppgtt);
if (err)
goto err_put;
}
if (intel_vgpu_active(gt->i915))
gen8_ppgtt_notify_vgt(ppgtt, true);
return ppgtt;
err_put:
i915_vm_put(&ppgtt->vm);
return ERR_PTR(err);
}
| linux-master | drivers/gpu/drm/i915/gt/gen8_ppgtt.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2017-2018 Intel Corporation
*/
#include <linux/prime_numbers.h>
#include <linux/string_helpers.h>
#include "intel_context.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
#include "intel_engine_regs.h"
#include "intel_gpu_commands.h"
#include "intel_gt.h"
#include "intel_gt_requests.h"
#include "intel_ring.h"
#include "selftest_engine_heartbeat.h"
#include "../selftests/i915_random.h"
#include "../i915_selftest.h"
#include "selftests/igt_flush_test.h"
#include "selftests/lib_sw_fence.h"
#include "selftests/mock_gem_device.h"
#include "selftests/mock_timeline.h"
static struct page *hwsp_page(struct intel_timeline *tl)
{
struct drm_i915_gem_object *obj = tl->hwsp_ggtt->obj;
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
return sg_page(obj->mm.pages->sgl);
}
static unsigned long hwsp_cacheline(struct intel_timeline *tl)
{
unsigned long address = (unsigned long)page_address(hwsp_page(tl));
return (address + offset_in_page(tl->hwsp_offset)) / TIMELINE_SEQNO_BYTES;
}
static int selftest_tl_pin(struct intel_timeline *tl)
{
struct i915_gem_ww_ctx ww;
int err;
i915_gem_ww_ctx_init(&ww, false);
retry:
err = i915_gem_object_lock(tl->hwsp_ggtt->obj, &ww);
if (!err)
err = intel_timeline_pin(tl, &ww);
if (err == -EDEADLK) {
err = i915_gem_ww_ctx_backoff(&ww);
if (!err)
goto retry;
}
i915_gem_ww_ctx_fini(&ww);
return err;
}
/* Only half of seqno's are usable, see __intel_timeline_get_seqno() */
#define CACHELINES_PER_PAGE (PAGE_SIZE / TIMELINE_SEQNO_BYTES / 2)
struct mock_hwsp_freelist {
struct intel_gt *gt;
struct radix_tree_root cachelines;
struct intel_timeline **history;
unsigned long count, max;
struct rnd_state prng;
};
enum {
SHUFFLE = BIT(0),
};
static void __mock_hwsp_record(struct mock_hwsp_freelist *state,
unsigned int idx,
struct intel_timeline *tl)
{
tl = xchg(&state->history[idx], tl);
if (tl) {
radix_tree_delete(&state->cachelines, hwsp_cacheline(tl));
intel_timeline_unpin(tl);
intel_timeline_put(tl);
}
}
static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
unsigned int count,
unsigned int flags)
{
struct intel_timeline *tl;
unsigned int idx;
while (count--) {
unsigned long cacheline;
int err;
tl = intel_timeline_create(state->gt);
if (IS_ERR(tl))
return PTR_ERR(tl);
err = selftest_tl_pin(tl);
if (err) {
intel_timeline_put(tl);
return err;
}
cacheline = hwsp_cacheline(tl);
err = radix_tree_insert(&state->cachelines, cacheline, tl);
if (err) {
if (err == -EEXIST) {
pr_err("HWSP cacheline %lu already used; duplicate allocation!\n",
cacheline);
}
intel_timeline_unpin(tl);
intel_timeline_put(tl);
return err;
}
idx = state->count++ % state->max;
__mock_hwsp_record(state, idx, tl);
}
if (flags & SHUFFLE)
i915_prandom_shuffle(state->history,
sizeof(*state->history),
min(state->count, state->max),
&state->prng);
count = i915_prandom_u32_max_state(min(state->count, state->max),
&state->prng);
while (count--) {
idx = --state->count % state->max;
__mock_hwsp_record(state, idx, NULL);
}
return 0;
}
static int mock_hwsp_freelist(void *arg)
{
struct mock_hwsp_freelist state;
struct drm_i915_private *i915;
const struct {
const char *name;
unsigned int flags;
} phases[] = {
{ "linear", 0 },
{ "shuffled", SHUFFLE },
{ },
}, *p;
unsigned int na;
int err = 0;
i915 = mock_gem_device();
if (!i915)
return -ENOMEM;
INIT_RADIX_TREE(&state.cachelines, GFP_KERNEL);
state.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed);
state.gt = to_gt(i915);
/*
* Create a bunch of timelines and check that their HWSP do not overlap.
* Free some, and try again.
*/
state.max = PAGE_SIZE / sizeof(*state.history);
state.count = 0;
state.history = kcalloc(state.max, sizeof(*state.history), GFP_KERNEL);
if (!state.history) {
err = -ENOMEM;
goto err_put;
}
for (p = phases; p->name; p++) {
pr_debug("%s(%s)\n", __func__, p->name);
for_each_prime_number_from(na, 1, 2 * CACHELINES_PER_PAGE) {
err = __mock_hwsp_timeline(&state, na, p->flags);
if (err)
goto out;
}
}
out:
for (na = 0; na < state.max; na++)
__mock_hwsp_record(&state, na, NULL);
kfree(state.history);
err_put:
mock_destroy_device(i915);
return err;
}
struct __igt_sync {
const char *name;
u32 seqno;
bool expected;
bool set;
};
static int __igt_sync(struct intel_timeline *tl,
u64 ctx,
const struct __igt_sync *p,
const char *name)
{
int ret;
if (__intel_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) {
pr_err("%s: %s(ctx=%llu, seqno=%u) expected passed %s but failed\n",
name, p->name, ctx, p->seqno, str_yes_no(p->expected));
return -EINVAL;
}
if (p->set) {
ret = __intel_timeline_sync_set(tl, ctx, p->seqno);
if (ret)
return ret;
}
return 0;
}
static int igt_sync(void *arg)
{
const struct __igt_sync pass[] = {
{ "unset", 0, false, false },
{ "new", 0, false, true },
{ "0a", 0, true, true },
{ "1a", 1, false, true },
{ "1b", 1, true, true },
{ "0b", 0, true, false },
{ "2a", 2, false, true },
{ "4", 4, false, true },
{ "INT_MAX", INT_MAX, false, true },
{ "INT_MAX-1", INT_MAX-1, true, false },
{ "INT_MAX+1", (u32)INT_MAX+1, false, true },
{ "INT_MAX", INT_MAX, true, false },
{ "UINT_MAX", UINT_MAX, false, true },
{ "wrap", 0, false, true },
{ "unwrap", UINT_MAX, true, false },
{},
}, *p;
struct intel_timeline tl;
int order, offset;
int ret = -ENODEV;
mock_timeline_init(&tl, 0);
for (p = pass; p->name; p++) {
for (order = 1; order < 64; order++) {
for (offset = -1; offset <= (order > 1); offset++) {
u64 ctx = BIT_ULL(order) + offset;
ret = __igt_sync(&tl, ctx, p, "1");
if (ret)
goto out;
}
}
}
mock_timeline_fini(&tl);
mock_timeline_init(&tl, 0);
for (order = 1; order < 64; order++) {
for (offset = -1; offset <= (order > 1); offset++) {
u64 ctx = BIT_ULL(order) + offset;
for (p = pass; p->name; p++) {
ret = __igt_sync(&tl, ctx, p, "2");
if (ret)
goto out;
}
}
}
out:
mock_timeline_fini(&tl);
return ret;
}
static unsigned int random_engine(struct rnd_state *rnd)
{
return i915_prandom_u32_max_state(I915_NUM_ENGINES, rnd);
}
static int bench_sync(void *arg)
{
struct rnd_state prng;
struct intel_timeline tl;
unsigned long end_time, count;
u64 prng32_1M;
ktime_t kt;
int order, last_order;
mock_timeline_init(&tl, 0);
/* Lookups from cache are very fast and so the random number generation
* and the loop itself becomes a significant factor in the per-iteration
* timings. We try to compensate the results by measuring the overhead
* of the prng and subtract it from the reported results.
*/
prandom_seed_state(&prng, i915_selftest.random_seed);
count = 0;
kt = ktime_get();
end_time = jiffies + HZ/10;
do {
u32 x;
/* Make sure the compiler doesn't optimise away the prng call */
WRITE_ONCE(x, prandom_u32_state(&prng));
count++;
} while (!time_after(jiffies, end_time));
kt = ktime_sub(ktime_get(), kt);
pr_debug("%s: %lu random evaluations, %lluns/prng\n",
__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
prng32_1M = div64_ul(ktime_to_ns(kt) << 20, count);
/* Benchmark (only) setting random context ids */
prandom_seed_state(&prng, i915_selftest.random_seed);
count = 0;
kt = ktime_get();
end_time = jiffies + HZ/10;
do {
u64 id = i915_prandom_u64_state(&prng);
__intel_timeline_sync_set(&tl, id, 0);
count++;
} while (!time_after(jiffies, end_time));
kt = ktime_sub(ktime_get(), kt);
kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
pr_info("%s: %lu random insertions, %lluns/insert\n",
__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
/* Benchmark looking up the exact same context ids as we just set */
prandom_seed_state(&prng, i915_selftest.random_seed);
end_time = count;
kt = ktime_get();
while (end_time--) {
u64 id = i915_prandom_u64_state(&prng);
if (!__intel_timeline_sync_is_later(&tl, id, 0)) {
mock_timeline_fini(&tl);
pr_err("Lookup of %llu failed\n", id);
return -EINVAL;
}
}
kt = ktime_sub(ktime_get(), kt);
kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
pr_info("%s: %lu random lookups, %lluns/lookup\n",
__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
mock_timeline_fini(&tl);
cond_resched();
mock_timeline_init(&tl, 0);
/* Benchmark setting the first N (in order) contexts */
count = 0;
kt = ktime_get();
end_time = jiffies + HZ/10;
do {
__intel_timeline_sync_set(&tl, count++, 0);
} while (!time_after(jiffies, end_time));
kt = ktime_sub(ktime_get(), kt);
pr_info("%s: %lu in-order insertions, %lluns/insert\n",
__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
/* Benchmark looking up the exact same context ids as we just set */
end_time = count;
kt = ktime_get();
while (end_time--) {
if (!__intel_timeline_sync_is_later(&tl, end_time, 0)) {
pr_err("Lookup of %lu failed\n", end_time);
mock_timeline_fini(&tl);
return -EINVAL;
}
}
kt = ktime_sub(ktime_get(), kt);
pr_info("%s: %lu in-order lookups, %lluns/lookup\n",
__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
mock_timeline_fini(&tl);
cond_resched();
mock_timeline_init(&tl, 0);
/* Benchmark searching for a random context id and maybe changing it */
prandom_seed_state(&prng, i915_selftest.random_seed);
count = 0;
kt = ktime_get();
end_time = jiffies + HZ/10;
do {
u32 id = random_engine(&prng);
u32 seqno = prandom_u32_state(&prng);
if (!__intel_timeline_sync_is_later(&tl, id, seqno))
__intel_timeline_sync_set(&tl, id, seqno);
count++;
} while (!time_after(jiffies, end_time));
kt = ktime_sub(ktime_get(), kt);
kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
pr_info("%s: %lu repeated insert/lookups, %lluns/op\n",
__func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
mock_timeline_fini(&tl);
cond_resched();
/* Benchmark searching for a known context id and changing the seqno */
for (last_order = 1, order = 1; order < 32;
({ int tmp = last_order; last_order = order; order += tmp; })) {
unsigned int mask = BIT(order) - 1;
mock_timeline_init(&tl, 0);
count = 0;
kt = ktime_get();
end_time = jiffies + HZ/10;
do {
/* Without assuming too many details of the underlying
* implementation, try to identify its phase-changes
* (if any)!
*/
u64 id = (u64)(count & mask) << order;
__intel_timeline_sync_is_later(&tl, id, 0);
__intel_timeline_sync_set(&tl, id, 0);
count++;
} while (!time_after(jiffies, end_time));
kt = ktime_sub(ktime_get(), kt);
pr_info("%s: %lu cyclic/%d insert/lookups, %lluns/op\n",
__func__, count, order,
(long long)div64_ul(ktime_to_ns(kt), count));
mock_timeline_fini(&tl);
cond_resched();
}
return 0;
}
int intel_timeline_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
SUBTEST(mock_hwsp_freelist),
SUBTEST(igt_sync),
SUBTEST(bench_sync),
};
return i915_subtests(tests, NULL);
}
static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value)
{
u32 *cs;
cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
if (GRAPHICS_VER(rq->i915) >= 8) {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = addr;
*cs++ = 0;
*cs++ = value;
} else if (GRAPHICS_VER(rq->i915) >= 4) {
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = 0;
*cs++ = addr;
*cs++ = value;
} else {
*cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*cs++ = addr;
*cs++ = value;
*cs++ = MI_NOOP;
}
intel_ring_advance(rq, cs);
return 0;
}
static struct i915_request *
checked_tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value)
{
struct i915_request *rq;
int err;
err = selftest_tl_pin(tl);
if (err) {
rq = ERR_PTR(err);
goto out;
}
if (READ_ONCE(*tl->hwsp_seqno) != tl->seqno) {
pr_err("Timeline created with incorrect breadcrumb, found %x, expected %x\n",
*tl->hwsp_seqno, tl->seqno);
intel_timeline_unpin(tl);
return ERR_PTR(-EINVAL);
}
rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq))
goto out_unpin;
i915_request_get(rq);
err = emit_ggtt_store_dw(rq, tl->hwsp_offset, value);
i915_request_add(rq);
if (err) {
i915_request_put(rq);
rq = ERR_PTR(err);
}
out_unpin:
intel_timeline_unpin(tl);
out:
if (IS_ERR(rq))
pr_err("Failed to write to timeline!\n");
return rq;
}
static int live_hwsp_engine(void *arg)
{
#define NUM_TIMELINES 4096
struct intel_gt *gt = arg;
struct intel_timeline **timelines;
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned long count, n;
int err = 0;
/*
* Create a bunch of timelines and check we can write
* independently to each of their breadcrumb slots.
*/
timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES,
sizeof(*timelines),
GFP_KERNEL);
if (!timelines)
return -ENOMEM;
count = 0;
for_each_engine(engine, gt, id) {
if (!intel_engine_can_store_dword(engine))
continue;
intel_engine_pm_get(engine);
for (n = 0; n < NUM_TIMELINES; n++) {
struct intel_timeline *tl;
struct i915_request *rq;
tl = intel_timeline_create(gt);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
break;
}
rq = checked_tl_write(tl, engine, count);
if (IS_ERR(rq)) {
intel_timeline_put(tl);
err = PTR_ERR(rq);
break;
}
timelines[count++] = tl;
i915_request_put(rq);
}
intel_engine_pm_put(engine);
if (err)
break;
}
if (igt_flush_test(gt->i915))
err = -EIO;
for (n = 0; n < count; n++) {
struct intel_timeline *tl = timelines[n];
if (!err && READ_ONCE(*tl->hwsp_seqno) != n) {
GEM_TRACE_ERR("Invalid seqno:%lu stored in timeline %llu @ %x, found 0x%x\n",
n, tl->fence_context, tl->hwsp_offset, *tl->hwsp_seqno);
GEM_TRACE_DUMP();
err = -EINVAL;
}
intel_timeline_put(tl);
}
kvfree(timelines);
return err;
#undef NUM_TIMELINES
}
static int live_hwsp_alternate(void *arg)
{
#define NUM_TIMELINES 4096
struct intel_gt *gt = arg;
struct intel_timeline **timelines;
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned long count, n;
int err = 0;
/*
* Create a bunch of timelines and check we can write
* independently to each of their breadcrumb slots with adjacent
* engines.
*/
timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES,
sizeof(*timelines),
GFP_KERNEL);
if (!timelines)
return -ENOMEM;
count = 0;
for (n = 0; n < NUM_TIMELINES; n++) {
for_each_engine(engine, gt, id) {
struct intel_timeline *tl;
struct i915_request *rq;
if (!intel_engine_can_store_dword(engine))
continue;
tl = intel_timeline_create(gt);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
goto out;
}
intel_engine_pm_get(engine);
rq = checked_tl_write(tl, engine, count);
intel_engine_pm_put(engine);
if (IS_ERR(rq)) {
intel_timeline_put(tl);
err = PTR_ERR(rq);
goto out;
}
timelines[count++] = tl;
i915_request_put(rq);
}
}
out:
if (igt_flush_test(gt->i915))
err = -EIO;
for (n = 0; n < count; n++) {
struct intel_timeline *tl = timelines[n];
if (!err && READ_ONCE(*tl->hwsp_seqno) != n) {
GEM_TRACE_ERR("Invalid seqno:%lu stored in timeline %llu @ %x, found 0x%x\n",
n, tl->fence_context, tl->hwsp_offset, *tl->hwsp_seqno);
GEM_TRACE_DUMP();
err = -EINVAL;
}
intel_timeline_put(tl);
}
kvfree(timelines);
return err;
#undef NUM_TIMELINES
}
static int live_hwsp_wrap(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
struct intel_timeline *tl;
enum intel_engine_id id;
int err = 0;
/*
* Across a seqno wrap, we need to keep the old cacheline alive for
* foreign GPU references.
*/
tl = intel_timeline_create(gt);
if (IS_ERR(tl))
return PTR_ERR(tl);
if (!tl->has_initial_breadcrumb)
goto out_free;
err = selftest_tl_pin(tl);
if (err)
goto out_free;
for_each_engine(engine, gt, id) {
const u32 *hwsp_seqno[2];
struct i915_request *rq;
u32 seqno[2];
if (!intel_engine_can_store_dword(engine))
continue;
rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out;
}
tl->seqno = -4u;
mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
err = intel_timeline_get_seqno(tl, rq, &seqno[0]);
mutex_unlock(&tl->mutex);
if (err) {
i915_request_add(rq);
goto out;
}
pr_debug("seqno[0]:%08x, hwsp_offset:%08x\n",
seqno[0], tl->hwsp_offset);
err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[0]);
if (err) {
i915_request_add(rq);
goto out;
}
hwsp_seqno[0] = tl->hwsp_seqno;
mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
err = intel_timeline_get_seqno(tl, rq, &seqno[1]);
mutex_unlock(&tl->mutex);
if (err) {
i915_request_add(rq);
goto out;
}
pr_debug("seqno[1]:%08x, hwsp_offset:%08x\n",
seqno[1], tl->hwsp_offset);
err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[1]);
if (err) {
i915_request_add(rq);
goto out;
}
hwsp_seqno[1] = tl->hwsp_seqno;
/* With wrap should come a new hwsp */
GEM_BUG_ON(seqno[1] >= seqno[0]);
GEM_BUG_ON(hwsp_seqno[0] == hwsp_seqno[1]);
i915_request_add(rq);
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Wait for timeline writes timed out!\n");
err = -EIO;
goto out;
}
if (READ_ONCE(*hwsp_seqno[0]) != seqno[0] ||
READ_ONCE(*hwsp_seqno[1]) != seqno[1]) {
pr_err("Bad timeline values: found (%x, %x), expected (%x, %x)\n",
*hwsp_seqno[0], *hwsp_seqno[1],
seqno[0], seqno[1]);
err = -EINVAL;
goto out;
}
intel_gt_retire_requests(gt); /* recycle HWSP */
}
out:
if (igt_flush_test(gt->i915))
err = -EIO;
intel_timeline_unpin(tl);
out_free:
intel_timeline_put(tl);
return err;
}
static int emit_read_hwsp(struct i915_request *rq,
u32 seqno, u32 hwsp,
u32 *addr)
{
const u32 gpr = i915_mmio_reg_offset(GEN8_RING_CS_GPR(rq->engine->mmio_base, 0));
u32 *cs;
cs = intel_ring_begin(rq, 12);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = *addr;
*cs++ = 0;
*cs++ = seqno;
*addr += 4;
*cs++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_USE_GGTT;
*cs++ = gpr;
*cs++ = hwsp;
*cs++ = 0;
*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
*cs++ = gpr;
*cs++ = *addr;
*cs++ = 0;
*addr += 4;
intel_ring_advance(rq, cs);
return 0;
}
struct hwsp_watcher {
struct i915_vma *vma;
struct i915_request *rq;
u32 addr;
u32 *map;
};
static bool cmp_lt(u32 a, u32 b)
{
return a < b;
}
static bool cmp_gte(u32 a, u32 b)
{
return a >= b;
}
static int setup_watcher(struct hwsp_watcher *w, struct intel_gt *gt,
struct intel_timeline *tl)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
obj = i915_gem_object_create_internal(gt->i915, SZ_2M);
if (IS_ERR(obj))
return PTR_ERR(obj);
/* keep the same cache settings as timeline */
i915_gem_object_set_pat_index(obj, tl->hwsp_ggtt->obj->pat_index);
w->map = i915_gem_object_pin_map_unlocked(obj,
page_unmask_bits(tl->hwsp_ggtt->obj->mm.mapping));
if (IS_ERR(w->map)) {
i915_gem_object_put(obj);
return PTR_ERR(w->map);
}
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
if (IS_ERR(vma)) {
i915_gem_object_put(obj);
return PTR_ERR(vma);
}
w->vma = vma;
w->addr = i915_ggtt_offset(vma);
return 0;
}
static void switch_tl_lock(struct i915_request *from, struct i915_request *to)
{
/* some light mutex juggling required; think co-routines */
if (from) {
lockdep_unpin_lock(&from->context->timeline->mutex, from->cookie);
mutex_unlock(&from->context->timeline->mutex);
}
if (to) {
mutex_lock(&to->context->timeline->mutex);
to->cookie = lockdep_pin_lock(&to->context->timeline->mutex);
}
}
static int create_watcher(struct hwsp_watcher *w,
struct intel_engine_cs *engine,
int ringsz)
{
struct intel_context *ce;
ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
ce->ring_size = ringsz;
w->rq = intel_context_create_request(ce);
intel_context_put(ce);
if (IS_ERR(w->rq))
return PTR_ERR(w->rq);
w->addr = i915_ggtt_offset(w->vma);
switch_tl_lock(w->rq, NULL);
return 0;
}
static int check_watcher(struct hwsp_watcher *w, const char *name,
bool (*op)(u32 hwsp, u32 seqno))
{
struct i915_request *rq = fetch_and_zero(&w->rq);
u32 offset, end;
int err;
GEM_BUG_ON(w->addr - i915_ggtt_offset(w->vma) > w->vma->size);
i915_request_get(rq);
switch_tl_lock(NULL, rq);
i915_request_add(rq);
if (i915_request_wait(rq, 0, HZ) < 0) {
err = -ETIME;
goto out;
}
err = 0;
offset = 0;
end = (w->addr - i915_ggtt_offset(w->vma)) / sizeof(*w->map);
while (offset < end) {
if (!op(w->map[offset + 1], w->map[offset])) {
pr_err("Watcher '%s' found HWSP value %x for seqno %x\n",
name, w->map[offset + 1], w->map[offset]);
err = -EINVAL;
}
offset += 2;
}
out:
i915_request_put(rq);
return err;
}
static void cleanup_watcher(struct hwsp_watcher *w)
{
if (w->rq) {
switch_tl_lock(NULL, w->rq);
i915_request_add(w->rq);
}
i915_vma_unpin_and_release(&w->vma, I915_VMA_RELEASE_MAP);
}
static bool retire_requests(struct intel_timeline *tl)
{
struct i915_request *rq, *rn;
mutex_lock(&tl->mutex);
list_for_each_entry_safe(rq, rn, &tl->requests, link)
if (!i915_request_retire(rq))
break;
mutex_unlock(&tl->mutex);
return !i915_active_fence_isset(&tl->last_request);
}
static struct i915_request *wrap_timeline(struct i915_request *rq)
{
struct intel_context *ce = rq->context;
struct intel_timeline *tl = ce->timeline;
u32 seqno = rq->fence.seqno;
while (tl->seqno >= seqno) { /* Cause a wrap */
i915_request_put(rq);
rq = intel_context_create_request(ce);
if (IS_ERR(rq))
return rq;
i915_request_get(rq);
i915_request_add(rq);
}
i915_request_put(rq);
rq = i915_request_create(ce);
if (IS_ERR(rq))
return rq;
i915_request_get(rq);
i915_request_add(rq);
return rq;
}
static int live_hwsp_read(void *arg)
{
struct intel_gt *gt = arg;
struct hwsp_watcher watcher[2] = {};
struct intel_engine_cs *engine;
struct intel_timeline *tl;
enum intel_engine_id id;
int err = 0;
int i;
/*
* If we take a reference to the HWSP for reading on the GPU, that
* read may be arbitrarily delayed (either by foreign fence or
* priority saturation) and a wrap can happen within 30 minutes.
* When the GPU read is finally submitted it should be correct,
* even across multiple wraps.
*/
if (GRAPHICS_VER(gt->i915) < 8) /* CS convenience [SRM/LRM] */
return 0;
tl = intel_timeline_create(gt);
if (IS_ERR(tl))
return PTR_ERR(tl);
if (!tl->has_initial_breadcrumb)
goto out_free;
selftest_tl_pin(tl);
for (i = 0; i < ARRAY_SIZE(watcher); i++) {
err = setup_watcher(&watcher[i], gt, tl);
if (err)
goto out;
}
for_each_engine(engine, gt, id) {
struct intel_context *ce;
unsigned long count = 0;
IGT_TIMEOUT(end_time);
/* Create a request we can use for remote reading of the HWSP */
err = create_watcher(&watcher[1], engine, SZ_512K);
if (err)
goto out;
do {
struct i915_sw_fence *submit;
struct i915_request *rq;
u32 hwsp, dummy;
submit = heap_fence_create(GFP_KERNEL);
if (!submit) {
err = -ENOMEM;
goto out;
}
err = create_watcher(&watcher[0], engine, SZ_4K);
if (err)
goto out;
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out;
}
ce->timeline = intel_timeline_get(tl);
/* Ensure timeline is mapped, done during first pin */
err = intel_context_pin(ce);
if (err) {
intel_context_put(ce);
goto out;
}
/*
* Start at a new wrap, and set seqno right before another wrap,
* saving 30 minutes of nops
*/
tl->seqno = -12u + 2 * (count & 3);
__intel_timeline_get_seqno(tl, &dummy);
rq = i915_request_create(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
intel_context_unpin(ce);
intel_context_put(ce);
goto out;
}
err = i915_sw_fence_await_dma_fence(&rq->submit,
&watcher[0].rq->fence, 0,
GFP_KERNEL);
if (err < 0) {
i915_request_add(rq);
intel_context_unpin(ce);
intel_context_put(ce);
goto out;
}
switch_tl_lock(rq, watcher[0].rq);
err = intel_timeline_read_hwsp(rq, watcher[0].rq, &hwsp);
if (err == 0)
err = emit_read_hwsp(watcher[0].rq, /* before */
rq->fence.seqno, hwsp,
&watcher[0].addr);
switch_tl_lock(watcher[0].rq, rq);
if (err) {
i915_request_add(rq);
intel_context_unpin(ce);
intel_context_put(ce);
goto out;
}
switch_tl_lock(rq, watcher[1].rq);
err = intel_timeline_read_hwsp(rq, watcher[1].rq, &hwsp);
if (err == 0)
err = emit_read_hwsp(watcher[1].rq, /* after */
rq->fence.seqno, hwsp,
&watcher[1].addr);
switch_tl_lock(watcher[1].rq, rq);
if (err) {
i915_request_add(rq);
intel_context_unpin(ce);
intel_context_put(ce);
goto out;
}
i915_request_get(rq);
i915_request_add(rq);
rq = wrap_timeline(rq);
intel_context_unpin(ce);
intel_context_put(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out;
}
err = i915_sw_fence_await_dma_fence(&watcher[1].rq->submit,
&rq->fence, 0,
GFP_KERNEL);
if (err < 0) {
i915_request_put(rq);
goto out;
}
err = check_watcher(&watcher[0], "before", cmp_lt);
i915_sw_fence_commit(submit);
heap_fence_put(submit);
if (err) {
i915_request_put(rq);
goto out;
}
count++;
/* Flush the timeline before manually wrapping again */
if (i915_request_wait(rq,
I915_WAIT_INTERRUPTIBLE,
HZ) < 0) {
err = -ETIME;
i915_request_put(rq);
goto out;
}
retire_requests(tl);
i915_request_put(rq);
/* Single requests are limited to half a ring at most */
if (8 * watcher[1].rq->ring->emit >
3 * watcher[1].rq->ring->size)
break;
} while (!__igt_timeout(end_time, NULL) &&
count < (PAGE_SIZE / TIMELINE_SEQNO_BYTES - 1) / 2);
pr_info("%s: simulated %lu wraps\n", engine->name, count);
err = check_watcher(&watcher[1], "after", cmp_gte);
if (err)
goto out;
}
out:
for (i = 0; i < ARRAY_SIZE(watcher); i++)
cleanup_watcher(&watcher[i]);
intel_timeline_unpin(tl);
if (igt_flush_test(gt->i915))
err = -EIO;
out_free:
intel_timeline_put(tl);
return err;
}
static int live_hwsp_rollover_kernel(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = 0;
/*
* Run the host for long enough, and even the kernel context will
* see a seqno rollover.
*/
for_each_engine(engine, gt, id) {
struct intel_context *ce = engine->kernel_context;
struct intel_timeline *tl = ce->timeline;
struct i915_request *rq[3] = {};
int i;
st_engine_heartbeat_disable(engine);
if (intel_gt_wait_for_idle(gt, HZ / 2)) {
err = -EIO;
goto out;
}
GEM_BUG_ON(i915_active_fence_isset(&tl->last_request));
tl->seqno = -2u;
WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
for (i = 0; i < ARRAY_SIZE(rq); i++) {
struct i915_request *this;
this = i915_request_create(ce);
if (IS_ERR(this)) {
err = PTR_ERR(this);
goto out;
}
pr_debug("%s: create fence.seqnp:%d\n",
engine->name,
lower_32_bits(this->fence.seqno));
GEM_BUG_ON(rcu_access_pointer(this->timeline) != tl);
rq[i] = i915_request_get(this);
i915_request_add(this);
}
/* We expected a wrap! */
GEM_BUG_ON(rq[2]->fence.seqno > rq[0]->fence.seqno);
if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
pr_err("Wait for timeline wrap timed out!\n");
err = -EIO;
goto out;
}
for (i = 0; i < ARRAY_SIZE(rq); i++) {
if (!i915_request_completed(rq[i])) {
pr_err("Pre-wrap request not completed!\n");
err = -EINVAL;
goto out;
}
}
out:
for (i = 0; i < ARRAY_SIZE(rq); i++)
i915_request_put(rq[i]);
st_engine_heartbeat_enable(engine);
if (err)
break;
}
if (igt_flush_test(gt->i915))
err = -EIO;
return err;
}
static int live_hwsp_rollover_user(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = 0;
/*
* Simulate a long running user context, and force the seqno wrap
* on the user's timeline.
*/
for_each_engine(engine, gt, id) {
struct i915_request *rq[3] = {};
struct intel_timeline *tl;
struct intel_context *ce;
int i;
ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
err = intel_context_alloc_state(ce);
if (err)
goto out;
tl = ce->timeline;
if (!tl->has_initial_breadcrumb)
goto out;
err = intel_context_pin(ce);
if (err)
goto out;
tl->seqno = -4u;
WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
for (i = 0; i < ARRAY_SIZE(rq); i++) {
struct i915_request *this;
this = intel_context_create_request(ce);
if (IS_ERR(this)) {
err = PTR_ERR(this);
goto out_unpin;
}
pr_debug("%s: create fence.seqnp:%d\n",
engine->name,
lower_32_bits(this->fence.seqno));
GEM_BUG_ON(rcu_access_pointer(this->timeline) != tl);
rq[i] = i915_request_get(this);
i915_request_add(this);
}
/* We expected a wrap! */
GEM_BUG_ON(rq[2]->fence.seqno > rq[0]->fence.seqno);
if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
pr_err("Wait for timeline wrap timed out!\n");
err = -EIO;
goto out_unpin;
}
for (i = 0; i < ARRAY_SIZE(rq); i++) {
if (!i915_request_completed(rq[i])) {
pr_err("Pre-wrap request not completed!\n");
err = -EINVAL;
goto out_unpin;
}
}
out_unpin:
intel_context_unpin(ce);
out:
for (i = 0; i < ARRAY_SIZE(rq); i++)
i915_request_put(rq[i]);
intel_context_put(ce);
if (err)
break;
}
if (igt_flush_test(gt->i915))
err = -EIO;
return err;
}
static int live_hwsp_recycle(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned long count;
int err = 0;
/*
* Check seqno writes into one timeline at a time. We expect to
* recycle the breadcrumb slot between iterations and neither
* want to confuse ourselves or the GPU.
*/
count = 0;
for_each_engine(engine, gt, id) {
IGT_TIMEOUT(end_time);
if (!intel_engine_can_store_dword(engine))
continue;
intel_engine_pm_get(engine);
do {
struct intel_timeline *tl;
struct i915_request *rq;
tl = intel_timeline_create(gt);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
break;
}
rq = checked_tl_write(tl, engine, count);
if (IS_ERR(rq)) {
intel_timeline_put(tl);
err = PTR_ERR(rq);
break;
}
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Wait for timeline writes timed out!\n");
i915_request_put(rq);
intel_timeline_put(tl);
err = -EIO;
break;
}
if (READ_ONCE(*tl->hwsp_seqno) != count) {
GEM_TRACE_ERR("Invalid seqno:%lu stored in timeline %llu @ %x found 0x%x\n",
count, tl->fence_context,
tl->hwsp_offset, *tl->hwsp_seqno);
GEM_TRACE_DUMP();
err = -EINVAL;
}
i915_request_put(rq);
intel_timeline_put(tl);
count++;
if (err)
break;
} while (!__igt_timeout(end_time, NULL));
intel_engine_pm_put(engine);
if (err)
break;
}
return err;
}
int intel_timeline_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_hwsp_recycle),
SUBTEST(live_hwsp_engine),
SUBTEST(live_hwsp_alternate),
SUBTEST(live_hwsp_wrap),
SUBTEST(live_hwsp_read),
SUBTEST(live_hwsp_rollover_kernel),
SUBTEST(live_hwsp_rollover_user),
};
if (intel_gt_is_wedged(to_gt(i915)))
return 0;
return intel_gt_live_subtests(tests, to_gt(i915));
}
| linux-master | drivers/gpu/drm/i915/gt/selftest_timeline.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2022 Intel Corporation
*/
#include <drm/drm_device.h>
#include <linux/device.h>
#include <linux/kobject.h>
#include <linux/printk.h>
#include <linux/sysfs.h>
#include "i915_drv.h"
#include "i915_sysfs.h"
#include "intel_gt.h"
#include "intel_gt_print.h"
#include "intel_gt_sysfs.h"
#include "intel_gt_sysfs_pm.h"
#include "intel_gt_types.h"
#include "intel_rc6.h"
bool is_object_gt(struct kobject *kobj)
{
return !strncmp(kobj->name, "gt", 2);
}
struct intel_gt *intel_gt_sysfs_get_drvdata(struct kobject *kobj,
const char *name)
{
/*
* We are interested at knowing from where the interface
* has been called, whether it's called from gt/ or from
* the parent directory.
* From the interface position it depends also the value of
* the private data.
* If the interface is called from gt/ then private data is
* of the "struct intel_gt *" type, otherwise it's * a
* "struct drm_i915_private *" type.
*/
if (!is_object_gt(kobj)) {
struct device *dev = kobj_to_dev(kobj);
struct drm_i915_private *i915 = kdev_minor_to_i915(dev);
return to_gt(i915);
}
return kobj_to_gt(kobj);
}
static struct kobject *gt_get_parent_obj(struct intel_gt *gt)
{
return >->i915->drm.primary->kdev->kobj;
}
static ssize_t id_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
return sysfs_emit(buf, "%u\n", gt->info.id);
}
static struct kobj_attribute attr_id = __ATTR_RO(id);
static struct attribute *id_attrs[] = {
&attr_id.attr,
NULL,
};
ATTRIBUTE_GROUPS(id);
/* A kobject needs a release() method even if it does nothing */
static void kobj_gt_release(struct kobject *kobj)
{
}
static const struct kobj_type kobj_gt_type = {
.release = kobj_gt_release,
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = id_groups,
};
void intel_gt_sysfs_register(struct intel_gt *gt)
{
/*
* We need to make things right with the
* ABI compatibility. The files were originally
* generated under the parent directory.
*
* We generate the files only for gt 0
* to avoid duplicates.
*/
if (gt_is_root(gt))
intel_gt_sysfs_pm_init(gt, gt_get_parent_obj(gt));
/* init and xfer ownership to sysfs tree */
if (kobject_init_and_add(>->sysfs_gt, &kobj_gt_type,
gt->i915->sysfs_gt, "gt%d", gt->info.id))
goto exit_fail;
gt->sysfs_defaults = kobject_create_and_add(".defaults", >->sysfs_gt);
if (!gt->sysfs_defaults)
goto exit_fail;
intel_gt_sysfs_pm_init(gt, >->sysfs_gt);
return;
exit_fail:
kobject_put(>->sysfs_gt);
gt_warn(gt, "failed to initialize sysfs root\n");
}
void intel_gt_sysfs_unregister(struct intel_gt *gt)
{
kobject_put(gt->sysfs_defaults);
kobject_put(>->sysfs_gt);
}
| linux-master | drivers/gpu/drm/i915/gt/intel_gt_sysfs.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
#include <drm/drm_print.h>
#include "i915_drv.h" /* for_each_engine! */
#include "intel_engine.h"
#include "intel_gt_debugfs.h"
#include "intel_gt_engines_debugfs.h"
static int engines_show(struct seq_file *m, void *data)
{
struct intel_gt *gt = m->private;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct drm_printer p;
p = drm_seq_file_printer(m);
for_each_engine(engine, gt, id)
intel_engine_dump(engine, &p, "%s\n", engine->name);
return 0;
}
DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(engines);
void intel_gt_engines_debugfs_register(struct intel_gt *gt, struct dentry *root)
{
static const struct intel_gt_debugfs_file files[] = {
{ "engines", &engines_fops },
};
intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
}
| linux-master | drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
#include <linux/slab.h>
#include "gem/i915_gem_lmem.h"
#include "i915_trace.h"
#include "intel_gt.h"
#include "intel_gtt.h"
#include "gen6_ppgtt.h"
#include "gen8_ppgtt.h"
struct i915_page_table *alloc_pt(struct i915_address_space *vm, int sz)
{
struct i915_page_table *pt;
pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
if (unlikely(!pt))
return ERR_PTR(-ENOMEM);
pt->base = vm->alloc_pt_dma(vm, sz);
if (IS_ERR(pt->base)) {
kfree(pt);
return ERR_PTR(-ENOMEM);
}
pt->is_compact = false;
atomic_set(&pt->used, 0);
return pt;
}
struct i915_page_directory *__alloc_pd(int count)
{
struct i915_page_directory *pd;
pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
if (unlikely(!pd))
return NULL;
pd->entry = kcalloc(count, sizeof(*pd->entry), I915_GFP_ALLOW_FAIL);
if (unlikely(!pd->entry)) {
kfree(pd);
return NULL;
}
spin_lock_init(&pd->lock);
return pd;
}
struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
{
struct i915_page_directory *pd;
pd = __alloc_pd(I915_PDES);
if (unlikely(!pd))
return ERR_PTR(-ENOMEM);
pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
if (IS_ERR(pd->pt.base)) {
kfree(pd->entry);
kfree(pd);
return ERR_PTR(-ENOMEM);
}
return pd;
}
void free_px(struct i915_address_space *vm, struct i915_page_table *pt, int lvl)
{
BUILD_BUG_ON(offsetof(struct i915_page_directory, pt));
if (lvl) {
struct i915_page_directory *pd =
container_of(pt, typeof(*pd), pt);
kfree(pd->entry);
}
if (pt->base)
i915_gem_object_put(pt->base);
kfree(pt);
}
static void
write_dma_entry(struct drm_i915_gem_object * const pdma,
const unsigned short idx,
const u64 encoded_entry)
{
u64 * const vaddr = __px_vaddr(pdma);
vaddr[idx] = encoded_entry;
drm_clflush_virt_range(&vaddr[idx], sizeof(u64));
}
void
__set_pd_entry(struct i915_page_directory * const pd,
const unsigned short idx,
struct i915_page_table * const to,
u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
{
/* Each thread pre-pins the pd, and we may have a thread per pde. */
GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * I915_PDES);
atomic_inc(px_used(pd));
pd->entry[idx] = to;
write_dma_entry(px_base(pd), idx, encode(px_dma(to), I915_CACHE_LLC));
}
void
clear_pd_entry(struct i915_page_directory * const pd,
const unsigned short idx,
const struct drm_i915_gem_object * const scratch)
{
GEM_BUG_ON(atomic_read(px_used(pd)) == 0);
write_dma_entry(px_base(pd), idx, scratch->encode);
pd->entry[idx] = NULL;
atomic_dec(px_used(pd));
}
bool
release_pd_entry(struct i915_page_directory * const pd,
const unsigned short idx,
struct i915_page_table * const pt,
const struct drm_i915_gem_object * const scratch)
{
bool free = false;
if (atomic_add_unless(&pt->used, -1, 1))
return false;
spin_lock(&pd->lock);
if (atomic_dec_and_test(&pt->used)) {
clear_pd_entry(pd, idx, scratch);
free = true;
}
spin_unlock(&pd->lock);
return free;
}
int i915_ppgtt_init_hw(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
gtt_write_workarounds(gt);
if (GRAPHICS_VER(i915) == 6)
gen6_ppgtt_enable(gt);
else if (GRAPHICS_VER(i915) == 7)
gen7_ppgtt_enable(gt);
return 0;
}
static struct i915_ppgtt *
__ppgtt_create(struct intel_gt *gt, unsigned long lmem_pt_obj_flags)
{
if (GRAPHICS_VER(gt->i915) < 8)
return gen6_ppgtt_create(gt);
else
return gen8_ppgtt_create(gt, lmem_pt_obj_flags);
}
struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
unsigned long lmem_pt_obj_flags)
{
struct i915_ppgtt *ppgtt;
ppgtt = __ppgtt_create(gt, lmem_pt_obj_flags);
if (IS_ERR(ppgtt))
return ppgtt;
trace_i915_ppgtt_create(&ppgtt->vm);
return ppgtt;
}
void ppgtt_bind_vma(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
struct i915_vma_resource *vma_res,
unsigned int pat_index,
u32 flags)
{
u32 pte_flags;
if (!vma_res->allocated) {
vm->allocate_va_range(vm, stash, vma_res->start,
vma_res->vma_size);
vma_res->allocated = true;
}
/* Applicable to VLV, and gen8+ */
pte_flags = 0;
if (vma_res->bi.readonly)
pte_flags |= PTE_READ_ONLY;
if (vma_res->bi.lmem)
pte_flags |= PTE_LM;
vm->insert_entries(vm, vma_res, pat_index, pte_flags);
wmb();
}
void ppgtt_unbind_vma(struct i915_address_space *vm,
struct i915_vma_resource *vma_res)
{
if (!vma_res->allocated)
return;
vm->clear_range(vm, vma_res->start, vma_res->vma_size);
vma_invalidate_tlb(vm, vma_res->tlb);
}
static unsigned long pd_count(u64 size, int shift)
{
/* Beware later misalignment */
return (size + 2 * (BIT_ULL(shift) - 1)) >> shift;
}
int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
u64 size)
{
unsigned long count;
int shift, n, pt_sz;
shift = vm->pd_shift;
if (!shift)
return 0;
pt_sz = stash->pt_sz;
if (!pt_sz)
pt_sz = I915_GTT_PAGE_SIZE_4K;
else
GEM_BUG_ON(!IS_DGFX(vm->i915));
GEM_BUG_ON(!is_power_of_2(pt_sz));
count = pd_count(size, shift);
while (count--) {
struct i915_page_table *pt;
pt = alloc_pt(vm, pt_sz);
if (IS_ERR(pt)) {
i915_vm_free_pt_stash(vm, stash);
return PTR_ERR(pt);
}
pt->stash = stash->pt[0];
stash->pt[0] = pt;
}
for (n = 1; n < vm->top; n++) {
shift += ilog2(I915_PDES); /* Each PD holds 512 entries */
count = pd_count(size, shift);
while (count--) {
struct i915_page_directory *pd;
pd = alloc_pd(vm);
if (IS_ERR(pd)) {
i915_vm_free_pt_stash(vm, stash);
return PTR_ERR(pd);
}
pd->pt.stash = stash->pt[1];
stash->pt[1] = &pd->pt;
}
}
return 0;
}
int i915_vm_map_pt_stash(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash)
{
struct i915_page_table *pt;
int n, err;
for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
for (pt = stash->pt[n]; pt; pt = pt->stash) {
err = map_pt_dma_locked(vm, pt->base);
if (err)
return err;
}
}
return 0;
}
void i915_vm_free_pt_stash(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash)
{
struct i915_page_table *pt;
int n;
for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
while ((pt = stash->pt[n])) {
stash->pt[n] = pt->stash;
free_px(vm, pt, n);
}
}
}
void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
unsigned long lmem_pt_obj_flags)
{
struct drm_i915_private *i915 = gt->i915;
ppgtt->vm.gt = gt;
ppgtt->vm.i915 = i915;
ppgtt->vm.dma = i915->drm.dev;
ppgtt->vm.total = BIT_ULL(RUNTIME_INFO(i915)->ppgtt_size);
ppgtt->vm.lmem_pt_obj_flags = lmem_pt_obj_flags;
dma_resv_init(&ppgtt->vm._resv);
i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
}
| linux-master | drivers/gpu/drm/i915/gt/intel_ppgtt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright © 2019 Intel Corporation
*/
#include "i915_selftest.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
#include "intel_gt.h"
#include "gem/selftests/mock_context.h"
#include "selftests/igt_flush_test.h"
#include "selftests/mock_drm.h"
static int request_sync(struct i915_request *rq)
{
struct intel_timeline *tl = i915_request_timeline(rq);
long timeout;
int err = 0;
intel_timeline_get(tl);
i915_request_get(rq);
/* Opencode i915_request_add() so we can keep the timeline locked. */
__i915_request_commit(rq);
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
__i915_request_queue_bh(rq);
timeout = i915_request_wait(rq, 0, HZ / 10);
if (timeout < 0)
err = timeout;
else
i915_request_retire_upto(rq);
lockdep_unpin_lock(&tl->mutex, rq->cookie);
mutex_unlock(&tl->mutex);
i915_request_put(rq);
intel_timeline_put(tl);
return err;
}
static int context_sync(struct intel_context *ce)
{
struct intel_timeline *tl = ce->timeline;
int err = 0;
mutex_lock(&tl->mutex);
do {
struct i915_request *rq;
long timeout;
if (list_empty(&tl->requests))
break;
rq = list_last_entry(&tl->requests, typeof(*rq), link);
i915_request_get(rq);
timeout = i915_request_wait(rq, 0, HZ / 10);
if (timeout < 0)
err = timeout;
else
i915_request_retire_upto(rq);
i915_request_put(rq);
} while (!err);
mutex_unlock(&tl->mutex);
/* Wait for all barriers to complete (remote CPU) before we check */
i915_active_unlock_wait(&ce->active);
return err;
}
static int __live_context_size(struct intel_engine_cs *engine)
{
struct intel_context *ce;
struct i915_request *rq;
void *vaddr;
int err;
ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
err = intel_context_pin(ce);
if (err)
goto err;
vaddr = i915_gem_object_pin_map_unlocked(ce->state->obj,
intel_gt_coherent_map_type(engine->gt,
ce->state->obj,
false));
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
intel_context_unpin(ce);
goto err;
}
/*
* Note that execlists also applies a redzone which it checks on
* context unpin when debugging. We are using the same location
* and same poison value so that our checks overlap. Despite the
* redundancy, we want to keep this little selftest so that we
* get coverage of any and all submission backends, and we can
* always extend this test to ensure we trick the HW into a
* compromising position wrt to the various sections that need
* to be written into the context state.
*
* TLDR; this overlaps with the execlists redzone.
*/
vaddr += engine->context_size - I915_GTT_PAGE_SIZE;
memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE);
rq = intel_context_create_request(ce);
intel_context_unpin(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_unpin;
}
err = request_sync(rq);
if (err)
goto err_unpin;
/* Force the context switch */
rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_unpin;
}
err = request_sync(rq);
if (err)
goto err_unpin;
if (memchr_inv(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE)) {
pr_err("%s context overwrote trailing red-zone!", engine->name);
err = -EINVAL;
}
err_unpin:
i915_gem_object_unpin_map(ce->state->obj);
err:
intel_context_put(ce);
return err;
}
static int live_context_size(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = 0;
/*
* Check that our context sizes are correct by seeing if the
* HW tries to write past the end of one.
*/
for_each_engine(engine, gt, id) {
struct file *saved;
if (!engine->context_size)
continue;
intel_engine_pm_get(engine);
/*
* Hide the old default state -- we lie about the context size
* and get confused when the default state is smaller than
* expected. For our do nothing request, inheriting the
* active state is sufficient, we are only checking that we
* don't use more than we planned.
*/
saved = fetch_and_zero(&engine->default_state);
/* Overlaps with the execlists redzone */
engine->context_size += I915_GTT_PAGE_SIZE;
err = __live_context_size(engine);
engine->context_size -= I915_GTT_PAGE_SIZE;
engine->default_state = saved;
intel_engine_pm_put(engine);
if (err)
break;
}
return err;
}
static int __live_active_context(struct intel_engine_cs *engine)
{
unsigned long saved_heartbeat;
struct intel_context *ce;
int pass;
int err;
/*
* We keep active contexts alive until after a subsequent context
* switch as the final write from the context-save will be after
* we retire the final request. We track when we unpin the context,
* under the presumption that the final pin is from the last request,
* and instead of immediately unpinning the context, we add a task
* to unpin the context from the next idle-barrier.
*
* This test makes sure that the context is kept alive until a
* subsequent idle-barrier (emitted when the engine wakeref hits 0
* with no more outstanding requests).
*
* In GuC submission mode we don't use idle barriers and we instead
* get a message from the GuC to signal that it is safe to unpin the
* context from memory.
*/
if (intel_engine_uses_guc(engine))
return 0;
if (intel_engine_pm_is_awake(engine)) {
pr_err("%s is awake before starting %s!\n",
engine->name, __func__);
return -EINVAL;
}
ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
saved_heartbeat = engine->props.heartbeat_interval_ms;
engine->props.heartbeat_interval_ms = 0;
for (pass = 0; pass <= 2; pass++) {
struct i915_request *rq;
intel_engine_pm_get(engine);
rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_engine;
}
err = request_sync(rq);
if (err)
goto out_engine;
/* Context will be kept active until after an idle-barrier. */
if (i915_active_is_idle(&ce->active)) {
pr_err("context is not active; expected idle-barrier (%s pass %d)\n",
engine->name, pass);
err = -EINVAL;
goto out_engine;
}
if (!intel_engine_pm_is_awake(engine)) {
pr_err("%s is asleep before idle-barrier\n",
engine->name);
err = -EINVAL;
goto out_engine;
}
out_engine:
intel_engine_pm_put(engine);
if (err)
goto err;
}
/* Now make sure our idle-barriers are flushed */
err = intel_engine_flush_barriers(engine);
if (err)
goto err;
/* Wait for the barrier and in the process wait for engine to park */
err = context_sync(engine->kernel_context);
if (err)
goto err;
if (!i915_active_is_idle(&ce->active)) {
pr_err("context is still active!");
err = -EINVAL;
}
intel_engine_pm_flush(engine);
if (intel_engine_pm_is_awake(engine)) {
struct drm_printer p = drm_debug_printer(__func__);
intel_engine_dump(engine, &p,
"%s is still awake:%d after idle-barriers\n",
engine->name,
atomic_read(&engine->wakeref.count));
GEM_TRACE_DUMP();
err = -EINVAL;
goto err;
}
err:
engine->props.heartbeat_interval_ms = saved_heartbeat;
intel_context_put(ce);
return err;
}
static int live_active_context(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = 0;
for_each_engine(engine, gt, id) {
err = __live_active_context(engine);
if (err)
break;
err = igt_flush_test(gt->i915);
if (err)
break;
}
return err;
}
static int __remote_sync(struct intel_context *ce, struct intel_context *remote)
{
struct i915_request *rq;
int err;
err = intel_context_pin(remote);
if (err)
return err;
rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto unpin;
}
err = intel_context_prepare_remote_request(remote, rq);
if (err) {
i915_request_add(rq);
goto unpin;
}
err = request_sync(rq);
unpin:
intel_context_unpin(remote);
return err;
}
static int __live_remote_context(struct intel_engine_cs *engine)
{
struct intel_context *local, *remote;
unsigned long saved_heartbeat;
int pass;
int err;
/*
* Check that our idle barriers do not interfere with normal
* activity tracking. In particular, check that operating
* on the context image remotely (intel_context_prepare_remote_request),
* which inserts foreign fences into intel_context.active, does not
* clobber the idle-barrier.
*
* In GuC submission mode we don't use idle barriers.
*/
if (intel_engine_uses_guc(engine))
return 0;
if (intel_engine_pm_is_awake(engine)) {
pr_err("%s is awake before starting %s!\n",
engine->name, __func__);
return -EINVAL;
}
remote = intel_context_create(engine);
if (IS_ERR(remote))
return PTR_ERR(remote);
local = intel_context_create(engine);
if (IS_ERR(local)) {
err = PTR_ERR(local);
goto err_remote;
}
saved_heartbeat = engine->props.heartbeat_interval_ms;
engine->props.heartbeat_interval_ms = 0;
intel_engine_pm_get(engine);
for (pass = 0; pass <= 2; pass++) {
err = __remote_sync(local, remote);
if (err)
break;
err = __remote_sync(engine->kernel_context, remote);
if (err)
break;
if (i915_active_is_idle(&remote->active)) {
pr_err("remote context is not active; expected idle-barrier (%s pass %d)\n",
engine->name, pass);
err = -EINVAL;
break;
}
}
intel_engine_pm_put(engine);
engine->props.heartbeat_interval_ms = saved_heartbeat;
intel_context_put(local);
err_remote:
intel_context_put(remote);
return err;
}
static int live_remote_context(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = 0;
for_each_engine(engine, gt, id) {
err = __live_remote_context(engine);
if (err)
break;
err = igt_flush_test(gt->i915);
if (err)
break;
}
return err;
}
int intel_context_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_context_size),
SUBTEST(live_active_context),
SUBTEST(live_remote_context),
};
struct intel_gt *gt = to_gt(i915);
if (intel_gt_is_wedged(gt))
return 0;
return intel_gt_live_subtests(tests, gt);
}
| linux-master | drivers/gpu/drm/i915/gt/selftest_context.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021 Intel Corporation
*/
#define NUM_STEPS 5
#define H2G_DELAY 50000
#define delay_for_h2g() usleep_range(H2G_DELAY, H2G_DELAY + 10000)
#define FREQUENCY_REQ_UNIT DIV_ROUND_CLOSEST(GT_FREQUENCY_MULTIPLIER, \
GEN9_FREQ_SCALER)
enum test_type {
VARY_MIN,
VARY_MAX,
MAX_GRANTED,
SLPC_POWER,
TILE_INTERACTION,
};
struct slpc_thread {
struct kthread_worker *worker;
struct kthread_work work;
struct intel_gt *gt;
int result;
};
static int slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 freq)
{
int ret;
ret = intel_guc_slpc_set_min_freq(slpc, freq);
if (ret)
pr_err("Could not set min frequency to [%u]\n", freq);
else /* Delay to ensure h2g completes */
delay_for_h2g();
return ret;
}
static int slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 freq)
{
int ret;
ret = intel_guc_slpc_set_max_freq(slpc, freq);
if (ret)
pr_err("Could not set maximum frequency [%u]\n",
freq);
else /* Delay to ensure h2g completes */
delay_for_h2g();
return ret;
}
static int slpc_set_freq(struct intel_gt *gt, u32 freq)
{
int err;
struct intel_guc_slpc *slpc = >->uc.guc.slpc;
err = slpc_set_max_freq(slpc, freq);
if (err) {
pr_err("Unable to update max freq");
return err;
}
err = slpc_set_min_freq(slpc, freq);
if (err) {
pr_err("Unable to update min freq");
return err;
}
return err;
}
static int slpc_restore_freq(struct intel_guc_slpc *slpc, u32 min, u32 max)
{
int err;
err = slpc_set_max_freq(slpc, max);
if (err) {
pr_err("Unable to restore max freq");
return err;
}
err = slpc_set_min_freq(slpc, min);
if (err) {
pr_err("Unable to restore min freq");
return err;
}
err = intel_guc_slpc_set_ignore_eff_freq(slpc, false);
if (err) {
pr_err("Unable to restore efficient freq");
return err;
}
return 0;
}
static u64 measure_power_at_freq(struct intel_gt *gt, int *freq, u64 *power)
{
int err = 0;
err = slpc_set_freq(gt, *freq);
if (err)
return err;
*freq = intel_rps_read_actual_frequency(>->rps);
*power = measure_power(>->rps, freq);
return err;
}
static int vary_max_freq(struct intel_guc_slpc *slpc, struct intel_rps *rps,
u32 *max_act_freq)
{
u32 step, max_freq, req_freq;
u32 act_freq;
int err = 0;
/* Go from max to min in 5 steps */
step = (slpc->rp0_freq - slpc->min_freq) / NUM_STEPS;
*max_act_freq = slpc->min_freq;
for (max_freq = slpc->rp0_freq; max_freq > slpc->min_freq;
max_freq -= step) {
err = slpc_set_max_freq(slpc, max_freq);
if (err)
break;
req_freq = intel_rps_read_punit_req_frequency(rps);
/* GuC requests freq in multiples of 50/3 MHz */
if (req_freq > (max_freq + FREQUENCY_REQ_UNIT)) {
pr_err("SWReq is %d, should be at most %d\n", req_freq,
max_freq + FREQUENCY_REQ_UNIT);
err = -EINVAL;
}
act_freq = intel_rps_read_actual_frequency(rps);
if (act_freq > *max_act_freq)
*max_act_freq = act_freq;
if (err)
break;
}
return err;
}
static int vary_min_freq(struct intel_guc_slpc *slpc, struct intel_rps *rps,
u32 *max_act_freq)
{
u32 step, min_freq, req_freq;
u32 act_freq;
int err = 0;
/* Go from min to max in 5 steps */
step = (slpc->rp0_freq - slpc->min_freq) / NUM_STEPS;
*max_act_freq = slpc->min_freq;
for (min_freq = slpc->min_freq; min_freq < slpc->rp0_freq;
min_freq += step) {
err = slpc_set_min_freq(slpc, min_freq);
if (err)
break;
req_freq = intel_rps_read_punit_req_frequency(rps);
/* GuC requests freq in multiples of 50/3 MHz */
if (req_freq < (min_freq - FREQUENCY_REQ_UNIT)) {
pr_err("SWReq is %d, should be at least %d\n", req_freq,
min_freq - FREQUENCY_REQ_UNIT);
err = -EINVAL;
}
act_freq = intel_rps_read_actual_frequency(rps);
if (act_freq > *max_act_freq)
*max_act_freq = act_freq;
if (err)
break;
}
return err;
}
static int slpc_power(struct intel_gt *gt, struct intel_engine_cs *engine)
{
struct intel_guc_slpc *slpc = >->uc.guc.slpc;
struct {
u64 power;
int freq;
} min, max;
int err = 0;
/*
* Our fundamental assumption is that running at lower frequency
* actually saves power. Let's see if our RAPL measurement supports
* that theory.
*/
if (!librapl_supported(gt->i915))
return 0;
min.freq = slpc->min_freq;
err = measure_power_at_freq(gt, &min.freq, &min.power);
if (err)
return err;
max.freq = slpc->rp0_freq;
err = measure_power_at_freq(gt, &max.freq, &max.power);
if (err)
return err;
pr_info("%s: min:%llumW @ %uMHz, max:%llumW @ %uMHz\n",
engine->name,
min.power, min.freq,
max.power, max.freq);
if (10 * min.freq >= 9 * max.freq) {
pr_notice("Could not control frequency, ran at [%uMHz, %uMhz]\n",
min.freq, max.freq);
}
if (11 * min.power > 10 * max.power) {
pr_err("%s: did not conserve power when setting lower frequency!\n",
engine->name);
err = -EINVAL;
}
/* Restore min/max frequencies */
slpc_set_max_freq(slpc, slpc->rp0_freq);
slpc_set_min_freq(slpc, slpc->min_freq);
return err;
}
static int max_granted_freq(struct intel_guc_slpc *slpc, struct intel_rps *rps, u32 *max_act_freq)
{
struct intel_gt *gt = rps_to_gt(rps);
u32 perf_limit_reasons;
int err = 0;
err = slpc_set_min_freq(slpc, slpc->rp0_freq);
if (err)
return err;
*max_act_freq = intel_rps_read_actual_frequency(rps);
if (*max_act_freq != slpc->rp0_freq) {
/* Check if there was some throttling by pcode */
perf_limit_reasons = intel_uncore_read(gt->uncore,
intel_gt_perf_limit_reasons_reg(gt));
/* If not, this is an error */
if (!(perf_limit_reasons & GT0_PERF_LIMIT_REASONS_MASK)) {
pr_err("Pcode did not grant max freq\n");
err = -EINVAL;
} else {
pr_info("Pcode throttled frequency 0x%x\n", perf_limit_reasons);
}
}
return err;
}
static int run_test(struct intel_gt *gt, int test_type)
{
struct intel_guc_slpc *slpc = >->uc.guc.slpc;
struct intel_rps *rps = >->rps;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct igt_spinner spin;
u32 slpc_min_freq, slpc_max_freq;
int err = 0;
if (!intel_uc_uses_guc_slpc(>->uc))
return 0;
if (slpc->min_freq == slpc->rp0_freq) {
pr_err("Min/Max are fused to the same value\n");
return -EINVAL;
}
if (igt_spinner_init(&spin, gt))
return -ENOMEM;
if (intel_guc_slpc_get_max_freq(slpc, &slpc_max_freq)) {
pr_err("Could not get SLPC max freq\n");
return -EIO;
}
if (intel_guc_slpc_get_min_freq(slpc, &slpc_min_freq)) {
pr_err("Could not get SLPC min freq\n");
return -EIO;
}
/*
* Set min frequency to RPn so that we can test the whole
* range of RPn-RP0.
*/
err = slpc_set_min_freq(slpc, slpc->min_freq);
if (err) {
pr_err("Unable to update min freq!");
return err;
}
/*
* Turn off efficient frequency so RPn/RP0 ranges are obeyed.
*/
err = intel_guc_slpc_set_ignore_eff_freq(slpc, true);
if (err) {
pr_err("Unable to turn off efficient freq!");
return err;
}
intel_gt_pm_wait_for_idle(gt);
intel_gt_pm_get(gt);
for_each_engine(engine, gt, id) {
struct i915_request *rq;
u32 max_act_freq;
if (!intel_engine_can_store_dword(engine))
continue;
st_engine_heartbeat_disable(engine);
rq = igt_spinner_create_request(&spin,
engine->kernel_context,
MI_NOOP);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
st_engine_heartbeat_enable(engine);
break;
}
i915_request_add(rq);
if (!igt_wait_for_spinner(&spin, rq)) {
pr_err("%s: Spinner did not start\n",
engine->name);
igt_spinner_end(&spin);
st_engine_heartbeat_enable(engine);
intel_gt_set_wedged(engine->gt);
err = -EIO;
break;
}
switch (test_type) {
case VARY_MIN:
err = vary_min_freq(slpc, rps, &max_act_freq);
break;
case VARY_MAX:
err = vary_max_freq(slpc, rps, &max_act_freq);
break;
case MAX_GRANTED:
case TILE_INTERACTION:
/* Media engines have a different RP0 */
if (gt->type != GT_MEDIA && (engine->class == VIDEO_DECODE_CLASS ||
engine->class == VIDEO_ENHANCEMENT_CLASS)) {
igt_spinner_end(&spin);
st_engine_heartbeat_enable(engine);
err = 0;
continue;
}
err = max_granted_freq(slpc, rps, &max_act_freq);
break;
case SLPC_POWER:
err = slpc_power(gt, engine);
break;
}
if (test_type != SLPC_POWER) {
pr_info("Max actual frequency for %s was %d\n",
engine->name, max_act_freq);
/* Actual frequency should rise above min */
if (max_act_freq <= slpc->min_freq) {
pr_err("Actual freq did not rise above min\n");
pr_err("Perf Limit Reasons: 0x%x\n",
intel_uncore_read(gt->uncore,
intel_gt_perf_limit_reasons_reg(gt)));
err = -EINVAL;
}
}
igt_spinner_end(&spin);
st_engine_heartbeat_enable(engine);
if (err)
break;
}
/* Restore min/max/efficient frequencies */
err = slpc_restore_freq(slpc, slpc_min_freq, slpc_max_freq);
if (igt_flush_test(gt->i915))
err = -EIO;
intel_gt_pm_put(gt);
igt_spinner_fini(&spin);
intel_gt_pm_wait_for_idle(gt);
return err;
}
static int live_slpc_vary_min(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_gt *gt;
unsigned int i;
int ret;
for_each_gt(gt, i915, i) {
ret = run_test(gt, VARY_MIN);
if (ret)
return ret;
}
return ret;
}
static int live_slpc_vary_max(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_gt *gt;
unsigned int i;
int ret;
for_each_gt(gt, i915, i) {
ret = run_test(gt, VARY_MAX);
if (ret)
return ret;
}
return ret;
}
/* check if pcode can grant RP0 */
static int live_slpc_max_granted(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_gt *gt;
unsigned int i;
int ret;
for_each_gt(gt, i915, i) {
ret = run_test(gt, MAX_GRANTED);
if (ret)
return ret;
}
return ret;
}
static int live_slpc_power(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_gt *gt;
unsigned int i;
int ret;
for_each_gt(gt, i915, i) {
ret = run_test(gt, SLPC_POWER);
if (ret)
return ret;
}
return ret;
}
static void slpc_spinner_thread(struct kthread_work *work)
{
struct slpc_thread *thread = container_of(work, typeof(*thread), work);
thread->result = run_test(thread->gt, TILE_INTERACTION);
}
static int live_slpc_tile_interaction(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_gt *gt;
struct slpc_thread *threads;
int i = 0, ret = 0;
threads = kcalloc(I915_MAX_GT, sizeof(*threads), GFP_KERNEL);
if (!threads)
return -ENOMEM;
for_each_gt(gt, i915, i) {
threads[i].worker = kthread_create_worker(0, "igt/slpc_parallel:%d", gt->info.id);
if (IS_ERR(threads[i].worker)) {
ret = PTR_ERR(threads[i].worker);
break;
}
threads[i].gt = gt;
kthread_init_work(&threads[i].work, slpc_spinner_thread);
kthread_queue_work(threads[i].worker, &threads[i].work);
}
for_each_gt(gt, i915, i) {
int status;
if (IS_ERR_OR_NULL(threads[i].worker))
continue;
kthread_flush_work(&threads[i].work);
status = READ_ONCE(threads[i].result);
if (status && !ret) {
pr_err("%s GT %d failed ", __func__, gt->info.id);
ret = status;
}
kthread_destroy_worker(threads[i].worker);
}
kfree(threads);
return ret;
}
int intel_slpc_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_slpc_vary_max),
SUBTEST(live_slpc_vary_min),
SUBTEST(live_slpc_max_granted),
SUBTEST(live_slpc_power),
SUBTEST(live_slpc_tile_interaction),
};
struct intel_gt *gt;
unsigned int i;
for_each_gt(gt, i915, i) {
if (intel_gt_is_wedged(gt))
return 0;
}
return i915_live_subtests(tests, i915);
}
| linux-master | drivers/gpu/drm/i915/gt/selftest_slpc.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
#include <asm/tsc.h>
#include <linux/cpufreq.h>
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_gt.h"
#include "intel_llc.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
#include "intel_rps.h"
struct ia_constants {
unsigned int min_gpu_freq;
unsigned int max_gpu_freq;
unsigned int min_ring_freq;
unsigned int max_ia_freq;
};
static struct intel_gt *llc_to_gt(struct intel_llc *llc)
{
return container_of(llc, struct intel_gt, llc);
}
static unsigned int cpu_max_MHz(void)
{
struct cpufreq_policy *policy;
unsigned int max_khz;
policy = cpufreq_cpu_get(0);
if (policy) {
max_khz = policy->cpuinfo.max_freq;
cpufreq_cpu_put(policy);
} else {
/*
* Default to measured freq if none found, PCU will ensure we
* don't go over
*/
max_khz = tsc_khz;
}
return max_khz / 1000;
}
static bool get_ia_constants(struct intel_llc *llc,
struct ia_constants *consts)
{
struct drm_i915_private *i915 = llc_to_gt(llc)->i915;
struct intel_rps *rps = &llc_to_gt(llc)->rps;
if (!HAS_LLC(i915) || IS_DGFX(i915))
return false;
consts->max_ia_freq = cpu_max_MHz();
consts->min_ring_freq =
intel_uncore_read(llc_to_gt(llc)->uncore, DCLK) & 0xf;
/* convert DDR frequency from units of 266.6MHz to bandwidth */
consts->min_ring_freq = mult_frac(consts->min_ring_freq, 8, 3);
consts->min_gpu_freq = intel_rps_get_min_raw_freq(rps);
consts->max_gpu_freq = intel_rps_get_max_raw_freq(rps);
return true;
}
static void calc_ia_freq(struct intel_llc *llc,
unsigned int gpu_freq,
const struct ia_constants *consts,
unsigned int *out_ia_freq,
unsigned int *out_ring_freq)
{
struct drm_i915_private *i915 = llc_to_gt(llc)->i915;
const int diff = consts->max_gpu_freq - gpu_freq;
unsigned int ia_freq = 0, ring_freq = 0;
if (GRAPHICS_VER(i915) >= 9) {
/*
* ring_freq = 2 * GT. ring_freq is in 100MHz units
* No floor required for ring frequency on SKL.
*/
ring_freq = gpu_freq;
} else if (GRAPHICS_VER(i915) >= 8) {
/* max(2 * GT, DDR). NB: GT is 50MHz units */
ring_freq = max(consts->min_ring_freq, gpu_freq);
} else if (IS_HASWELL(i915)) {
ring_freq = mult_frac(gpu_freq, 5, 4);
ring_freq = max(consts->min_ring_freq, ring_freq);
/* leave ia_freq as the default, chosen by cpufreq */
} else {
const int min_freq = 15;
const int scale = 180;
/*
* On older processors, there is no separate ring
* clock domain, so in order to boost the bandwidth
* of the ring, we need to upclock the CPU (ia_freq).
*
* For GPU frequencies less than 750MHz,
* just use the lowest ring freq.
*/
if (gpu_freq < min_freq)
ia_freq = 800;
else
ia_freq = consts->max_ia_freq - diff * scale / 2;
ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
}
*out_ia_freq = ia_freq;
*out_ring_freq = ring_freq;
}
static void gen6_update_ring_freq(struct intel_llc *llc)
{
struct ia_constants consts;
unsigned int gpu_freq;
if (!get_ia_constants(llc, &consts))
return;
/*
* Although this is unlikely on any platform during initialization,
* let's ensure we don't get accidentally into infinite loop
*/
if (consts.max_gpu_freq <= consts.min_gpu_freq)
return;
/*
* For each potential GPU frequency, load a ring frequency we'd like
* to use for memory access. We do this by specifying the IA frequency
* the PCU should use as a reference to determine the ring frequency.
*/
for (gpu_freq = consts.max_gpu_freq;
gpu_freq >= consts.min_gpu_freq;
gpu_freq--) {
unsigned int ia_freq, ring_freq;
calc_ia_freq(llc, gpu_freq, &consts, &ia_freq, &ring_freq);
snb_pcode_write(llc_to_gt(llc)->uncore, GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
gpu_freq);
}
}
void intel_llc_enable(struct intel_llc *llc)
{
gen6_update_ring_freq(llc);
}
void intel_llc_disable(struct intel_llc *llc)
{
/* Currently there is no HW configuration to be done to disable. */
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_llc.c"
#endif
| linux-master | drivers/gpu/drm/i915/gt/intel_llc.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2008-2021 Intel Corporation
*/
#include <drm/drm_cache.h>
#include "gem/i915_gem_internal.h"
#include "gen2_engine_cs.h"
#include "gen6_engine_cs.h"
#include "gen6_ppgtt.h"
#include "gen7_renderclear.h"
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_mitigations.h"
#include "i915_reg.h"
#include "intel_breadcrumbs.h"
#include "intel_context.h"
#include "intel_engine_regs.h"
#include "intel_gt.h"
#include "intel_gt_irq.h"
#include "intel_gt_regs.h"
#include "intel_reset.h"
#include "intel_ring.h"
#include "shmem_utils.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
/* Rough estimate of the typical request size, performing a flush,
* set-context and then emitting the batch.
*/
#define LEGACY_REQUEST_SIZE 200
static void set_hwstam(struct intel_engine_cs *engine, u32 mask)
{
/*
* Keep the render interrupt unmasked as this papers over
* lost interrupts following a reset.
*/
if (engine->class == RENDER_CLASS) {
if (GRAPHICS_VER(engine->i915) >= 6)
mask &= ~BIT(0);
else
mask &= ~I915_USER_INTERRUPT;
}
intel_engine_set_hwsp_writemask(engine, mask);
}
static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys)
{
u32 addr;
addr = lower_32_bits(phys);
if (GRAPHICS_VER(engine->i915) >= 4)
addr |= (phys >> 28) & 0xf0;
intel_uncore_write(engine->uncore, HWS_PGA, addr);
}
static struct page *status_page(struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *obj = engine->status_page.vma->obj;
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
return sg_page(obj->mm.pages->sgl);
}
static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
{
set_hws_pga(engine, PFN_PHYS(page_to_pfn(status_page(engine))));
set_hwstam(engine, ~0u);
}
static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
{
i915_reg_t hwsp;
/*
* The ring status page addresses are no longer next to the rest of
* the ring registers as of gen7.
*/
if (GRAPHICS_VER(engine->i915) == 7) {
switch (engine->id) {
/*
* No more rings exist on Gen7. Default case is only to shut up
* gcc switch check warning.
*/
default:
GEM_BUG_ON(engine->id);
fallthrough;
case RCS0:
hwsp = RENDER_HWS_PGA_GEN7;
break;
case BCS0:
hwsp = BLT_HWS_PGA_GEN7;
break;
case VCS0:
hwsp = BSD_HWS_PGA_GEN7;
break;
case VECS0:
hwsp = VEBOX_HWS_PGA_GEN7;
break;
}
} else if (GRAPHICS_VER(engine->i915) == 6) {
hwsp = RING_HWS_PGA_GEN6(engine->mmio_base);
} else {
hwsp = RING_HWS_PGA(engine->mmio_base);
}
intel_uncore_write_fw(engine->uncore, hwsp, offset);
intel_uncore_posting_read_fw(engine->uncore, hwsp);
}
static void flush_cs_tlb(struct intel_engine_cs *engine)
{
if (!IS_GRAPHICS_VER(engine->i915, 6, 7))
return;
/* ring should be idle before issuing a sync flush*/
if ((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0)
drm_warn(&engine->i915->drm, "%s not idle before sync flush!\n",
engine->name);
ENGINE_WRITE_FW(engine, RING_INSTPM,
_MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
INSTPM_SYNC_FLUSH));
if (__intel_wait_for_register_fw(engine->uncore,
RING_INSTPM(engine->mmio_base),
INSTPM_SYNC_FLUSH, 0,
2000, 0, NULL))
ENGINE_TRACE(engine,
"wait for SyncFlush to complete for TLB invalidation timed out\n");
}
static void ring_setup_status_page(struct intel_engine_cs *engine)
{
set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma));
set_hwstam(engine, ~0u);
flush_cs_tlb(engine);
}
static struct i915_address_space *vm_alias(struct i915_address_space *vm)
{
if (i915_is_ggtt(vm))
vm = &i915_vm_to_ggtt(vm)->alias->vm;
return vm;
}
static u32 pp_dir(struct i915_address_space *vm)
{
return to_gen6_ppgtt(i915_vm_to_ppgtt(vm))->pp_dir;
}
static void set_pp_dir(struct intel_engine_cs *engine)
{
struct i915_address_space *vm = vm_alias(engine->gt->vm);
if (!vm)
return;
ENGINE_WRITE_FW(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G);
ENGINE_WRITE_FW(engine, RING_PP_DIR_BASE, pp_dir(vm));
if (GRAPHICS_VER(engine->i915) >= 7) {
ENGINE_WRITE_FW(engine,
RING_MODE_GEN7,
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
}
}
static bool stop_ring(struct intel_engine_cs *engine)
{
/* Empty the ring by skipping to the end */
ENGINE_WRITE_FW(engine, RING_HEAD, ENGINE_READ_FW(engine, RING_TAIL));
ENGINE_POSTING_READ(engine, RING_HEAD);
/* The ring must be empty before it is disabled */
ENGINE_WRITE_FW(engine, RING_CTL, 0);
ENGINE_POSTING_READ(engine, RING_CTL);
/* Then reset the disabled ring */
ENGINE_WRITE_FW(engine, RING_HEAD, 0);
ENGINE_WRITE_FW(engine, RING_TAIL, 0);
return (ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) == 0;
}
static int xcs_resume(struct intel_engine_cs *engine)
{
struct intel_ring *ring = engine->legacy.ring;
ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n",
ring->head, ring->tail);
/*
* Double check the ring is empty & disabled before we resume. Called
* from atomic context during PCI probe, so _hardirq().
*/
intel_synchronize_hardirq(engine->i915);
if (!stop_ring(engine))
goto err;
if (HWS_NEEDS_PHYSICAL(engine->i915))
ring_setup_phys_status_page(engine);
else
ring_setup_status_page(engine);
intel_breadcrumbs_reset(engine->breadcrumbs);
/* Enforce ordering by reading HEAD register back */
ENGINE_POSTING_READ(engine, RING_HEAD);
/*
* Initialize the ring. This must happen _after_ we've cleared the ring
* registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring
* register values.
*/
ENGINE_WRITE_FW(engine, RING_START, i915_ggtt_offset(ring->vma));
/* Check that the ring offsets point within the ring! */
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
intel_ring_update_space(ring);
set_pp_dir(engine);
/* First wake the ring up to an empty/idle ring */
ENGINE_WRITE_FW(engine, RING_HEAD, ring->head);
ENGINE_WRITE_FW(engine, RING_TAIL, ring->head);
ENGINE_POSTING_READ(engine, RING_TAIL);
ENGINE_WRITE_FW(engine, RING_CTL,
RING_CTL_SIZE(ring->size) | RING_VALID);
/* If the head is still not zero, the ring is dead */
if (__intel_wait_for_register_fw(engine->uncore,
RING_CTL(engine->mmio_base),
RING_VALID, RING_VALID,
5000, 0, NULL))
goto err;
if (GRAPHICS_VER(engine->i915) > 2)
ENGINE_WRITE_FW(engine,
RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
/* Now awake, let it get started */
if (ring->tail != ring->head) {
ENGINE_WRITE_FW(engine, RING_TAIL, ring->tail);
ENGINE_POSTING_READ(engine, RING_TAIL);
}
/* Papering over lost _interrupts_ immediately following the restart */
intel_engine_signal_breadcrumbs(engine);
return 0;
err:
drm_err(&engine->i915->drm,
"%s initialization failed; "
"ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
engine->name,
ENGINE_READ(engine, RING_CTL),
ENGINE_READ(engine, RING_CTL) & RING_VALID,
ENGINE_READ(engine, RING_HEAD), ring->head,
ENGINE_READ(engine, RING_TAIL), ring->tail,
ENGINE_READ(engine, RING_START),
i915_ggtt_offset(ring->vma));
return -EIO;
}
static void sanitize_hwsp(struct intel_engine_cs *engine)
{
struct intel_timeline *tl;
list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
intel_timeline_reset_seqno(tl);
}
static void xcs_sanitize(struct intel_engine_cs *engine)
{
/*
* Poison residual state on resume, in case the suspend didn't!
*
* We have to assume that across suspend/resume (or other loss
* of control) that the contents of our pinned buffers has been
* lost, replaced by garbage. Since this doesn't always happen,
* let's poison such state so that we more quickly spot when
* we falsely assume it has been preserved.
*/
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
/*
* The kernel_context HWSP is stored in the status_page. As above,
* that may be lost on resume/initialisation, and so we need to
* reset the value in the HWSP.
*/
sanitize_hwsp(engine);
/* And scrub the dirty cachelines for the HWSP */
drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE);
intel_engine_reset_pinned_contexts(engine);
}
static void reset_prepare(struct intel_engine_cs *engine)
{
/*
* We stop engines, otherwise we might get failed reset and a
* dead gpu (on elk). Also as modern gpu as kbl can suffer
* from system hang if batchbuffer is progressing when
* the reset is issued, regardless of READY_TO_RESET ack.
* Thus assume it is best to stop engines on all gens
* where we have a gpu reset.
*
* WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
*
* WaMediaResetMainRingCleanup:ctg,elk (presumably)
* WaClearRingBufHeadRegAtInit:ctg,elk
*
* FIXME: Wa for more modern gens needs to be validated
*/
ENGINE_TRACE(engine, "\n");
intel_engine_stop_cs(engine);
if (!stop_ring(engine)) {
/* G45 ring initialization often fails to reset head to zero */
ENGINE_TRACE(engine,
"HEAD not reset to zero, "
"{ CTL:%08x, HEAD:%08x, TAIL:%08x, START:%08x }\n",
ENGINE_READ_FW(engine, RING_CTL),
ENGINE_READ_FW(engine, RING_HEAD),
ENGINE_READ_FW(engine, RING_TAIL),
ENGINE_READ_FW(engine, RING_START));
if (!stop_ring(engine)) {
drm_err(&engine->i915->drm,
"failed to set %s head to zero "
"ctl %08x head %08x tail %08x start %08x\n",
engine->name,
ENGINE_READ_FW(engine, RING_CTL),
ENGINE_READ_FW(engine, RING_HEAD),
ENGINE_READ_FW(engine, RING_TAIL),
ENGINE_READ_FW(engine, RING_START));
}
}
}
static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
{
struct i915_request *pos, *rq;
unsigned long flags;
u32 head;
rq = NULL;
spin_lock_irqsave(&engine->sched_engine->lock, flags);
rcu_read_lock();
list_for_each_entry(pos, &engine->sched_engine->requests, sched.link) {
if (!__i915_request_is_complete(pos)) {
rq = pos;
break;
}
}
rcu_read_unlock();
/*
* The guilty request will get skipped on a hung engine.
*
* Users of client default contexts do not rely on logical
* state preserved between batches so it is safe to execute
* queued requests following the hang. Non default contexts
* rely on preserved state, so skipping a batch loses the
* evolution of the state and it needs to be considered corrupted.
* Executing more queued batches on top of corrupted state is
* risky. But we take the risk by trying to advance through
* the queued requests in order to make the client behaviour
* more predictable around resets, by not throwing away random
* amount of batches it has prepared for execution. Sophisticated
* clients can use gem_reset_stats_ioctl and dma fence status
* (exported via sync_file info ioctl on explicit fences) to observe
* when it loses the context state and should rebuild accordingly.
*
* The context ban, and ultimately the client ban, mechanism are safety
* valves if client submission ends up resulting in nothing more than
* subsequent hangs.
*/
if (rq) {
/*
* Try to restore the logical GPU state to match the
* continuation of the request queue. If we skip the
* context/PD restore, then the next request may try to execute
* assuming that its context is valid and loaded on the GPU and
* so may try to access invalid memory, prompting repeated GPU
* hangs.
*
* If the request was guilty, we still restore the logical
* state in case the next request requires it (e.g. the
* aliasing ppgtt), but skip over the hung batch.
*
* If the request was innocent, we try to replay the request
* with the restored context.
*/
__i915_request_reset(rq, stalled);
GEM_BUG_ON(rq->ring != engine->legacy.ring);
head = rq->head;
} else {
head = engine->legacy.ring->tail;
}
engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head);
spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
}
static void reset_finish(struct intel_engine_cs *engine)
{
}
static void reset_cancel(struct intel_engine_cs *engine)
{
struct i915_request *request;
unsigned long flags;
spin_lock_irqsave(&engine->sched_engine->lock, flags);
/* Mark all submitted requests as skipped. */
list_for_each_entry(request, &engine->sched_engine->requests, sched.link)
i915_request_put(i915_request_mark_eio(request));
intel_engine_signal_breadcrumbs(engine);
/* Remaining _unready_ requests will be nop'ed when submitted */
spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
}
static void i9xx_submit_request(struct i915_request *request)
{
i915_request_submit(request);
wmb(); /* paranoid flush writes out of the WCB before mmio */
ENGINE_WRITE(request->engine, RING_TAIL,
intel_ring_set_tail(request->ring, request->tail));
}
static void __ring_context_fini(struct intel_context *ce)
{
i915_vma_put(ce->state);
}
static void ring_context_destroy(struct kref *ref)
{
struct intel_context *ce = container_of(ref, typeof(*ce), ref);
GEM_BUG_ON(intel_context_is_pinned(ce));
if (ce->state)
__ring_context_fini(ce);
intel_context_fini(ce);
intel_context_free(ce);
}
static int ring_context_init_default_state(struct intel_context *ce,
struct i915_gem_ww_ctx *ww)
{
struct drm_i915_gem_object *obj = ce->state->obj;
void *vaddr;
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
shmem_read(ce->engine->default_state, 0,
vaddr, ce->engine->context_size);
i915_gem_object_flush_map(obj);
__i915_gem_object_release_map(obj);
__set_bit(CONTEXT_VALID_BIT, &ce->flags);
return 0;
}
static int ring_context_pre_pin(struct intel_context *ce,
struct i915_gem_ww_ctx *ww,
void **unused)
{
struct i915_address_space *vm;
int err = 0;
if (ce->engine->default_state &&
!test_bit(CONTEXT_VALID_BIT, &ce->flags)) {
err = ring_context_init_default_state(ce, ww);
if (err)
return err;
}
vm = vm_alias(ce->vm);
if (vm)
err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)), ww);
return err;
}
static void __context_unpin_ppgtt(struct intel_context *ce)
{
struct i915_address_space *vm;
vm = vm_alias(ce->vm);
if (vm)
gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm));
}
static void ring_context_unpin(struct intel_context *ce)
{
}
static void ring_context_post_unpin(struct intel_context *ce)
{
__context_unpin_ppgtt(ce);
}
static struct i915_vma *
alloc_context_vma(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int err;
obj = i915_gem_object_create_shmem(i915, engine->context_size);
if (IS_ERR(obj))
return ERR_CAST(obj);
/*
* Try to make the context utilize L3 as well as LLC.
*
* On VLV we don't have L3 controls in the PTEs so we
* shouldn't touch the cache level, especially as that
* would make the object snooped which might have a
* negative performance impact.
*
* Snooping is required on non-llc platforms in execlist
* mode, but since all GGTT accesses use PAT entry 0 we
* get snooping anyway regardless of cache_level.
*
* This is only applicable for Ivy Bridge devices since
* later platforms don't have L3 control bits in the PTE.
*/
if (IS_IVYBRIDGE(i915))
i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC);
vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
}
return vma;
err_obj:
i915_gem_object_put(obj);
return ERR_PTR(err);
}
static int ring_context_alloc(struct intel_context *ce)
{
struct intel_engine_cs *engine = ce->engine;
/* One ringbuffer to rule them all */
GEM_BUG_ON(!engine->legacy.ring);
ce->ring = engine->legacy.ring;
ce->timeline = intel_timeline_get(engine->legacy.timeline);
GEM_BUG_ON(ce->state);
if (engine->context_size) {
struct i915_vma *vma;
vma = alloc_context_vma(engine);
if (IS_ERR(vma))
return PTR_ERR(vma);
ce->state = vma;
}
return 0;
}
static int ring_context_pin(struct intel_context *ce, void *unused)
{
return 0;
}
static void ring_context_reset(struct intel_context *ce)
{
intel_ring_reset(ce->ring, ce->ring->emit);
clear_bit(CONTEXT_VALID_BIT, &ce->flags);
}
static void ring_context_revoke(struct intel_context *ce,
struct i915_request *rq,
unsigned int preempt_timeout_ms)
{
struct intel_engine_cs *engine;
if (!rq || !i915_request_is_active(rq))
return;
engine = rq->engine;
lockdep_assert_held(&engine->sched_engine->lock);
list_for_each_entry_continue(rq, &engine->sched_engine->requests,
sched.link)
if (rq->context == ce) {
i915_request_set_error_once(rq, -EIO);
__i915_request_skip(rq);
}
}
static void ring_context_cancel_request(struct intel_context *ce,
struct i915_request *rq)
{
struct intel_engine_cs *engine = NULL;
i915_request_active_engine(rq, &engine);
if (engine && intel_engine_pulse(engine))
intel_gt_handle_error(engine->gt, engine->mask, 0,
"request cancellation by %s",
current->comm);
}
static const struct intel_context_ops ring_context_ops = {
.alloc = ring_context_alloc,
.cancel_request = ring_context_cancel_request,
.revoke = ring_context_revoke,
.pre_pin = ring_context_pre_pin,
.pin = ring_context_pin,
.unpin = ring_context_unpin,
.post_unpin = ring_context_post_unpin,
.enter = intel_context_enter_engine,
.exit = intel_context_exit_engine,
.reset = ring_context_reset,
.destroy = ring_context_destroy,
};
static int load_pd_dir(struct i915_request *rq,
struct i915_address_space *vm,
u32 valid)
{
const struct intel_engine_cs * const engine = rq->engine;
u32 *cs;
cs = intel_ring_begin(rq, 12);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base));
*cs++ = valid;
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
*cs++ = pp_dir(vm);
/* Stall until the page table load is complete? */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
*cs++ = intel_gt_scratch_offset(engine->gt,
INTEL_GT_SCRATCH_FIELD_DEFAULT);
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(RING_INSTPM(engine->mmio_base));
*cs++ = _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE);
intel_ring_advance(rq, cs);
return rq->engine->emit_flush(rq, EMIT_FLUSH);
}
static int mi_set_context(struct i915_request *rq,
struct intel_context *ce,
u32 flags)
{
struct intel_engine_cs *engine = rq->engine;
struct drm_i915_private *i915 = engine->i915;
enum intel_engine_id id;
const int num_engines =
IS_HASWELL(i915) ? engine->gt->info.num_engines - 1 : 0;
bool force_restore = false;
int len;
u32 *cs;
len = 4;
if (GRAPHICS_VER(i915) == 7)
len += 2 + (num_engines ? 4 * num_engines + 6 : 0);
else if (GRAPHICS_VER(i915) == 5)
len += 2;
if (flags & MI_FORCE_RESTORE) {
GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
flags &= ~MI_FORCE_RESTORE;
force_restore = true;
len += 2;
}
cs = intel_ring_begin(rq, len);
if (IS_ERR(cs))
return PTR_ERR(cs);
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
if (GRAPHICS_VER(i915) == 7) {
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
if (num_engines) {
struct intel_engine_cs *signaller;
*cs++ = MI_LOAD_REGISTER_IMM(num_engines);
for_each_engine(signaller, engine->gt, id) {
if (signaller == engine)
continue;
*cs++ = i915_mmio_reg_offset(
RING_PSMI_CTL(signaller->mmio_base));
*cs++ = _MASKED_BIT_ENABLE(
GEN6_PSMI_SLEEP_MSG_DISABLE);
}
}
} else if (GRAPHICS_VER(i915) == 5) {
/*
* This w/a is only listed for pre-production ilk a/b steppings,
* but is also mentioned for programming the powerctx. To be
* safe, just apply the workaround; we do not use SyncFlush so
* this should never take effect and so be a no-op!
*/
*cs++ = MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN;
}
if (force_restore) {
/*
* The HW doesn't handle being told to restore the current
* context very well. Quite often it likes goes to go off and
* sulk, especially when it is meant to be reloading PP_DIR.
* A very simple fix to force the reload is to simply switch
* away from the current context and back again.
*
* Note that the kernel_context will contain random state
* following the INHIBIT_RESTORE. We accept this since we
* never use the kernel_context state; it is merely a
* placeholder we use to flush other contexts.
*/
*cs++ = MI_SET_CONTEXT;
*cs++ = i915_ggtt_offset(engine->kernel_context->state) |
MI_MM_SPACE_GTT |
MI_RESTORE_INHIBIT;
}
*cs++ = MI_NOOP;
*cs++ = MI_SET_CONTEXT;
*cs++ = i915_ggtt_offset(ce->state) | flags;
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv
*/
*cs++ = MI_NOOP;
if (GRAPHICS_VER(i915) == 7) {
if (num_engines) {
struct intel_engine_cs *signaller;
i915_reg_t last_reg = INVALID_MMIO_REG; /* keep gcc quiet */
*cs++ = MI_LOAD_REGISTER_IMM(num_engines);
for_each_engine(signaller, engine->gt, id) {
if (signaller == engine)
continue;
last_reg = RING_PSMI_CTL(signaller->mmio_base);
*cs++ = i915_mmio_reg_offset(last_reg);
*cs++ = _MASKED_BIT_DISABLE(
GEN6_PSMI_SLEEP_MSG_DISABLE);
}
/* Insert a delay before the next switch! */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(last_reg);
*cs++ = intel_gt_scratch_offset(engine->gt,
INTEL_GT_SCRATCH_FIELD_DEFAULT);
*cs++ = MI_NOOP;
}
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
} else if (GRAPHICS_VER(i915) == 5) {
*cs++ = MI_SUSPEND_FLUSH;
}
intel_ring_advance(rq, cs);
return 0;
}
static int remap_l3_slice(struct i915_request *rq, int slice)
{
#define L3LOG_DW (GEN7_L3LOG_SIZE / sizeof(u32))
u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice];
int i;
if (!remap_info)
return 0;
cs = intel_ring_begin(rq, L3LOG_DW * 2 + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
/*
* Note: We do not worry about the concurrent register cacheline hang
* here because no other code should access these registers other than
* at initialization time.
*/
*cs++ = MI_LOAD_REGISTER_IMM(L3LOG_DW);
for (i = 0; i < L3LOG_DW; i++) {
*cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
*cs++ = remap_info[i];
}
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
return 0;
#undef L3LOG_DW
}
static int remap_l3(struct i915_request *rq)
{
struct i915_gem_context *ctx = i915_request_gem_context(rq);
int i, err;
if (!ctx || !ctx->remap_slice)
return 0;
for (i = 0; i < MAX_L3_SLICES; i++) {
if (!(ctx->remap_slice & BIT(i)))
continue;
err = remap_l3_slice(rq, i);
if (err)
return err;
}
ctx->remap_slice = 0;
return 0;
}
static int switch_mm(struct i915_request *rq, struct i915_address_space *vm)
{
int ret;
if (!vm)
return 0;
ret = rq->engine->emit_flush(rq, EMIT_FLUSH);
if (ret)
return ret;
/*
* Not only do we need a full barrier (post-sync write) after
* invalidating the TLBs, but we need to wait a little bit
* longer. Whether this is merely delaying us, or the
* subsequent flush is a key part of serialising with the
* post-sync op, this extra pass appears vital before a
* mm switch!
*/
ret = load_pd_dir(rq, vm, PP_DIR_DCLV_2G);
if (ret)
return ret;
return rq->engine->emit_flush(rq, EMIT_INVALIDATE);
}
static int clear_residuals(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
int ret;
ret = switch_mm(rq, vm_alias(engine->kernel_context->vm));
if (ret)
return ret;
if (engine->kernel_context->state) {
ret = mi_set_context(rq,
engine->kernel_context,
MI_MM_SPACE_GTT | MI_RESTORE_INHIBIT);
if (ret)
return ret;
}
ret = engine->emit_bb_start(rq,
i915_vma_offset(engine->wa_ctx.vma), 0,
0);
if (ret)
return ret;
ret = engine->emit_flush(rq, EMIT_FLUSH);
if (ret)
return ret;
/* Always invalidate before the next switch_mm() */
return engine->emit_flush(rq, EMIT_INVALIDATE);
}
static int switch_context(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
struct intel_context *ce = rq->context;
void **residuals = NULL;
int ret;
GEM_BUG_ON(HAS_EXECLISTS(engine->i915));
if (engine->wa_ctx.vma && ce != engine->kernel_context) {
if (engine->wa_ctx.vma->private != ce &&
i915_mitigate_clear_residuals()) {
ret = clear_residuals(rq);
if (ret)
return ret;
residuals = &engine->wa_ctx.vma->private;
}
}
ret = switch_mm(rq, vm_alias(ce->vm));
if (ret)
return ret;
if (ce->state) {
u32 flags;
GEM_BUG_ON(engine->id != RCS0);
/* For resource streamer on HSW+ and power context elsewhere */
BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN);
BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN);
flags = MI_SAVE_EXT_STATE_EN | MI_MM_SPACE_GTT;
if (test_bit(CONTEXT_VALID_BIT, &ce->flags))
flags |= MI_RESTORE_EXT_STATE_EN;
else
flags |= MI_RESTORE_INHIBIT;
ret = mi_set_context(rq, ce, flags);
if (ret)
return ret;
}
ret = remap_l3(rq);
if (ret)
return ret;
/*
* Now past the point of no return, this request _will_ be emitted.
*
* Or at least this preamble will be emitted, the request may be
* interrupted prior to submitting the user payload. If so, we
* still submit the "empty" request in order to preserve global
* state tracking such as this, our tracking of the current
* dirty context.
*/
if (residuals) {
intel_context_put(*residuals);
*residuals = intel_context_get(ce);
}
return 0;
}
static int ring_request_alloc(struct i915_request *request)
{
int ret;
GEM_BUG_ON(!intel_context_is_pinned(request->context));
GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb);
/*
* Flush enough space to reduce the likelihood of waiting after
* we start building the request - in which case we will just
* have to repeat work.
*/
request->reserved_space += LEGACY_REQUEST_SIZE;
/* Unconditionally invalidate GPU caches and TLBs. */
ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
if (ret)
return ret;
ret = switch_context(request);
if (ret)
return ret;
request->reserved_space -= LEGACY_REQUEST_SIZE;
return 0;
}
static void gen6_bsd_submit_request(struct i915_request *request)
{
struct intel_uncore *uncore = request->engine->uncore;
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
/* Every tail move must follow the sequence below */
/* Disable notification that the ring is IDLE. The GT
* will then assume that it is busy and bring it out of rc6.
*/
intel_uncore_write_fw(uncore, RING_PSMI_CTL(GEN6_BSD_RING_BASE),
_MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
/* Clear the context id. Here be magic! */
intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0);
/* Wait for the ring not to be idle, i.e. for it to wake up. */
if (__intel_wait_for_register_fw(uncore,
RING_PSMI_CTL(GEN6_BSD_RING_BASE),
GEN6_BSD_SLEEP_INDICATOR,
0,
1000, 0, NULL))
drm_err(&uncore->i915->drm,
"timed out waiting for the BSD ring to wake up\n");
/* Now that the ring is fully powered up, update the tail */
i9xx_submit_request(request);
/* Let the ring send IDLE messages to the GT again,
* and so let it sleep to conserve power when idle.
*/
intel_uncore_write_fw(uncore, RING_PSMI_CTL(GEN6_BSD_RING_BASE),
_MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
}
static void i9xx_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = i9xx_submit_request;
}
static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = gen6_bsd_submit_request;
}
static void ring_release(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
drm_WARN_ON(&i915->drm, GRAPHICS_VER(i915) > 2 &&
(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
intel_engine_cleanup_common(engine);
if (engine->wa_ctx.vma) {
intel_context_put(engine->wa_ctx.vma->private);
i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
}
intel_ring_unpin(engine->legacy.ring);
intel_ring_put(engine->legacy.ring);
intel_timeline_unpin(engine->legacy.timeline);
intel_timeline_put(engine->legacy.timeline);
}
static void irq_handler(struct intel_engine_cs *engine, u16 iir)
{
intel_engine_signal_breadcrumbs(engine);
}
static void setup_irq(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
intel_engine_set_irq_handler(engine, irq_handler);
if (GRAPHICS_VER(i915) >= 6) {
engine->irq_enable = gen6_irq_enable;
engine->irq_disable = gen6_irq_disable;
} else if (GRAPHICS_VER(i915) >= 5) {
engine->irq_enable = gen5_irq_enable;
engine->irq_disable = gen5_irq_disable;
} else if (GRAPHICS_VER(i915) >= 3) {
engine->irq_enable = gen3_irq_enable;
engine->irq_disable = gen3_irq_disable;
} else {
engine->irq_enable = gen2_irq_enable;
engine->irq_disable = gen2_irq_disable;
}
}
static void add_to_engine(struct i915_request *rq)
{
lockdep_assert_held(&rq->engine->sched_engine->lock);
list_move_tail(&rq->sched.link, &rq->engine->sched_engine->requests);
}
static void remove_from_engine(struct i915_request *rq)
{
spin_lock_irq(&rq->engine->sched_engine->lock);
list_del_init(&rq->sched.link);
/* Prevent further __await_execution() registering a cb, then flush */
set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
spin_unlock_irq(&rq->engine->sched_engine->lock);
i915_request_notify_execute_cb_imm(rq);
}
static void setup_common(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
/* gen8+ are only supported with execlists */
GEM_BUG_ON(GRAPHICS_VER(i915) >= 8);
setup_irq(engine);
engine->resume = xcs_resume;
engine->sanitize = xcs_sanitize;
engine->reset.prepare = reset_prepare;
engine->reset.rewind = reset_rewind;
engine->reset.cancel = reset_cancel;
engine->reset.finish = reset_finish;
engine->add_active_request = add_to_engine;
engine->remove_active_request = remove_from_engine;
engine->cops = &ring_context_ops;
engine->request_alloc = ring_request_alloc;
/*
* Using a global execution timeline; the previous final breadcrumb is
* equivalent to our next initial bread so we can elide
* engine->emit_init_breadcrumb().
*/
engine->emit_fini_breadcrumb = gen3_emit_breadcrumb;
if (GRAPHICS_VER(i915) == 5)
engine->emit_fini_breadcrumb = gen5_emit_breadcrumb;
engine->set_default_submission = i9xx_set_default_submission;
if (GRAPHICS_VER(i915) >= 6)
engine->emit_bb_start = gen6_emit_bb_start;
else if (GRAPHICS_VER(i915) >= 4)
engine->emit_bb_start = gen4_emit_bb_start;
else if (IS_I830(i915) || IS_I845G(i915))
engine->emit_bb_start = i830_emit_bb_start;
else
engine->emit_bb_start = gen3_emit_bb_start;
}
static void setup_rcs(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
if (HAS_L3_DPF(i915))
engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
if (GRAPHICS_VER(i915) >= 7) {
engine->emit_flush = gen7_emit_flush_rcs;
engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_rcs;
} else if (GRAPHICS_VER(i915) == 6) {
engine->emit_flush = gen6_emit_flush_rcs;
engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_rcs;
} else if (GRAPHICS_VER(i915) == 5) {
engine->emit_flush = gen4_emit_flush_rcs;
} else {
if (GRAPHICS_VER(i915) < 4)
engine->emit_flush = gen2_emit_flush;
else
engine->emit_flush = gen4_emit_flush_rcs;
engine->irq_enable_mask = I915_USER_INTERRUPT;
}
if (IS_HASWELL(i915))
engine->emit_bb_start = hsw_emit_bb_start;
}
static void setup_vcs(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
if (GRAPHICS_VER(i915) >= 6) {
/* gen6 bsd needs a special wa for tail updates */
if (GRAPHICS_VER(i915) == 6)
engine->set_default_submission = gen6_bsd_set_default_submission;
engine->emit_flush = gen6_emit_flush_vcs;
engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
if (GRAPHICS_VER(i915) == 6)
engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs;
else
engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
} else {
engine->emit_flush = gen4_emit_flush_vcs;
if (GRAPHICS_VER(i915) == 5)
engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
else
engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
}
}
static void setup_bcs(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
engine->emit_flush = gen6_emit_flush_xcs;
engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
if (GRAPHICS_VER(i915) == 6)
engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs;
else
engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
}
static void setup_vecs(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
GEM_BUG_ON(GRAPHICS_VER(i915) < 7);
engine->emit_flush = gen6_emit_flush_xcs;
engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
engine->irq_enable = hsw_irq_enable_vecs;
engine->irq_disable = hsw_irq_disable_vecs;
engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
}
static int gen7_ctx_switch_bb_setup(struct intel_engine_cs * const engine,
struct i915_vma * const vma)
{
return gen7_setup_clear_gpr_bb(engine, vma);
}
static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine,
struct i915_gem_ww_ctx *ww,
struct i915_vma *vma)
{
int err;
err = i915_vma_pin_ww(vma, ww, 0, 0, PIN_USER | PIN_HIGH);
if (err)
return err;
err = i915_vma_sync(vma);
if (err)
goto err_unpin;
err = gen7_ctx_switch_bb_setup(engine, vma);
if (err)
goto err_unpin;
engine->wa_ctx.vma = vma;
return 0;
err_unpin:
i915_vma_unpin(vma);
return err;
}
static struct i915_vma *gen7_ctx_vma(struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int size, err;
if (GRAPHICS_VER(engine->i915) != 7 || engine->class != RENDER_CLASS)
return NULL;
err = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */);
if (err < 0)
return ERR_PTR(err);
if (!err)
return NULL;
size = ALIGN(err, PAGE_SIZE);
obj = i915_gem_object_create_internal(engine->i915, size);
if (IS_ERR(obj))
return ERR_CAST(obj);
vma = i915_vma_instance(obj, engine->gt->vm, NULL);
if (IS_ERR(vma)) {
i915_gem_object_put(obj);
return ERR_CAST(vma);
}
vma->private = intel_context_create(engine); /* dummy residuals */
if (IS_ERR(vma->private)) {
err = PTR_ERR(vma->private);
vma->private = NULL;
i915_gem_object_put(obj);
return ERR_PTR(err);
}
return vma;
}
int intel_ring_submission_setup(struct intel_engine_cs *engine)
{
struct i915_gem_ww_ctx ww;
struct intel_timeline *timeline;
struct intel_ring *ring;
struct i915_vma *gen7_wa_vma;
int err;
setup_common(engine);
switch (engine->class) {
case RENDER_CLASS:
setup_rcs(engine);
break;
case VIDEO_DECODE_CLASS:
setup_vcs(engine);
break;
case COPY_ENGINE_CLASS:
setup_bcs(engine);
break;
case VIDEO_ENHANCEMENT_CLASS:
setup_vecs(engine);
break;
default:
MISSING_CASE(engine->class);
return -ENODEV;
}
timeline = intel_timeline_create_from_engine(engine,
I915_GEM_HWS_SEQNO_ADDR);
if (IS_ERR(timeline)) {
err = PTR_ERR(timeline);
goto err;
}
GEM_BUG_ON(timeline->has_initial_breadcrumb);
ring = intel_engine_create_ring(engine, SZ_16K);
if (IS_ERR(ring)) {
err = PTR_ERR(ring);
goto err_timeline;
}
GEM_BUG_ON(engine->legacy.ring);
engine->legacy.ring = ring;
engine->legacy.timeline = timeline;
gen7_wa_vma = gen7_ctx_vma(engine);
if (IS_ERR(gen7_wa_vma)) {
err = PTR_ERR(gen7_wa_vma);
goto err_ring;
}
i915_gem_ww_ctx_init(&ww, false);
retry:
err = i915_gem_object_lock(timeline->hwsp_ggtt->obj, &ww);
if (!err && gen7_wa_vma)
err = i915_gem_object_lock(gen7_wa_vma->obj, &ww);
if (!err)
err = i915_gem_object_lock(engine->legacy.ring->vma->obj, &ww);
if (!err)
err = intel_timeline_pin(timeline, &ww);
if (!err) {
err = intel_ring_pin(ring, &ww);
if (err)
intel_timeline_unpin(timeline);
}
if (err)
goto out;
GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
if (gen7_wa_vma) {
err = gen7_ctx_switch_bb_init(engine, &ww, gen7_wa_vma);
if (err) {
intel_ring_unpin(ring);
intel_timeline_unpin(timeline);
}
}
out:
if (err == -EDEADLK) {
err = i915_gem_ww_ctx_backoff(&ww);
if (!err)
goto retry;
}
i915_gem_ww_ctx_fini(&ww);
if (err)
goto err_gen7_put;
/* Finally, take ownership and responsibility for cleanup! */
engine->release = ring_release;
return 0;
err_gen7_put:
if (gen7_wa_vma) {
intel_context_put(gen7_wa_vma->private);
i915_gem_object_put(gen7_wa_vma->obj);
}
err_ring:
intel_ring_put(ring);
err_timeline:
intel_timeline_put(timeline);
err:
intel_engine_cleanup_common(engine);
return err;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_ring_submission.c"
#endif
| linux-master | drivers/gpu/drm/i915/gt/intel_ring_submission.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
#include <linux/debugfs.h>
#include "i915_drv.h"
#include "intel_gt.h"
#include "intel_gt_debugfs.h"
#include "intel_gt_engines_debugfs.h"
#include "intel_gt_mcr.h"
#include "intel_gt_pm_debugfs.h"
#include "intel_sseu_debugfs.h"
#include "uc/intel_uc_debugfs.h"
int intel_gt_debugfs_reset_show(struct intel_gt *gt, u64 *val)
{
int ret = intel_gt_terminally_wedged(gt);
switch (ret) {
case -EIO:
*val = 1;
return 0;
case 0:
*val = 0;
return 0;
default:
return ret;
}
}
void intel_gt_debugfs_reset_store(struct intel_gt *gt, u64 val)
{
/* Flush any previous reset before applying for a new one */
wait_event(gt->reset.queue,
!test_bit(I915_RESET_BACKOFF, >->reset.flags));
intel_gt_handle_error(gt, val, I915_ERROR_CAPTURE,
"Manually reset engine mask to %llx", val);
}
/*
* keep the interface clean where the first parameter
* is a 'struct intel_gt *' instead of 'void *'
*/
static int __intel_gt_debugfs_reset_show(void *data, u64 *val)
{
return intel_gt_debugfs_reset_show(data, val);
}
static int __intel_gt_debugfs_reset_store(void *data, u64 val)
{
intel_gt_debugfs_reset_store(data, val);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(reset_fops, __intel_gt_debugfs_reset_show,
__intel_gt_debugfs_reset_store, "%llu\n");
static int steering_show(struct seq_file *m, void *data)
{
struct drm_printer p = drm_seq_file_printer(m);
struct intel_gt *gt = m->private;
intel_gt_mcr_report_steering(&p, gt, true);
return 0;
}
DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(steering);
static void gt_debugfs_register(struct intel_gt *gt, struct dentry *root)
{
static const struct intel_gt_debugfs_file files[] = {
{ "reset", &reset_fops, NULL },
{ "steering", &steering_fops },
};
intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
}
void intel_gt_debugfs_register(struct intel_gt *gt)
{
struct dentry *root;
char gtname[4];
if (!gt->i915->drm.primary->debugfs_root)
return;
snprintf(gtname, sizeof(gtname), "gt%u", gt->info.id);
root = debugfs_create_dir(gtname, gt->i915->drm.primary->debugfs_root);
if (IS_ERR(root))
return;
gt_debugfs_register(gt, root);
intel_gt_engines_debugfs_register(gt, root);
intel_gt_pm_debugfs_register(gt, root);
intel_sseu_debugfs_register(gt, root);
intel_uc_debugfs_register(>->uc, root);
}
void intel_gt_debugfs_register_files(struct dentry *root,
const struct intel_gt_debugfs_file *files,
unsigned long count, void *data)
{
while (count--) {
umode_t mode = files->fops->write ? 0644 : 0444;
if (!files->eval || files->eval(data))
debugfs_create_file(files->name,
mode, root, data,
files->fops);
files++;
}
}
| linux-master | drivers/gpu/drm/i915/gt/intel_gt_debugfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright © 2018 Intel Corporation
*/
#include <linux/sort.h>
#include "gt/intel_gt_print.h"
#include "i915_selftest.h"
#include "intel_engine_regs.h"
#include "intel_gpu_commands.h"
#include "intel_gt_clock_utils.h"
#include "selftest_engine.h"
#include "selftest_engine_heartbeat.h"
#include "selftests/igt_atomic.h"
#include "selftests/igt_flush_test.h"
#include "selftests/igt_spinner.h"
#define COUNT 5
static int cmp_u64(const void *A, const void *B)
{
const u64 *a = A, *b = B;
return *a - *b;
}
static u64 trifilter(u64 *a)
{
sort(a, COUNT, sizeof(*a), cmp_u64, NULL);
return (a[1] + 2 * a[2] + a[3]) >> 2;
}
static u32 *emit_wait(u32 *cs, u32 offset, int op, u32 value)
{
*cs++ = MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_POLL |
op;
*cs++ = value;
*cs++ = offset;
*cs++ = 0;
return cs;
}
static u32 *emit_store(u32 *cs, u32 offset, u32 value)
{
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = offset;
*cs++ = 0;
*cs++ = value;
return cs;
}
static u32 *emit_srm(u32 *cs, i915_reg_t reg, u32 offset)
{
*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
*cs++ = i915_mmio_reg_offset(reg);
*cs++ = offset;
*cs++ = 0;
return cs;
}
static void write_semaphore(u32 *x, u32 value)
{
WRITE_ONCE(*x, value);
wmb();
}
static int __measure_timestamps(struct intel_context *ce,
u64 *dt, u64 *d_ring, u64 *d_ctx)
{
struct intel_engine_cs *engine = ce->engine;
u32 *sema = memset32(engine->status_page.addr + 1000, 0, 5);
u32 offset = i915_ggtt_offset(engine->status_page.vma);
struct i915_request *rq;
u32 *cs;
rq = intel_context_create_request(ce);
if (IS_ERR(rq))
return PTR_ERR(rq);
cs = intel_ring_begin(rq, 28);
if (IS_ERR(cs)) {
i915_request_add(rq);
return PTR_ERR(cs);
}
/* Signal & wait for start */
cs = emit_store(cs, offset + 4008, 1);
cs = emit_wait(cs, offset + 4008, MI_SEMAPHORE_SAD_NEQ_SDD, 1);
cs = emit_srm(cs, RING_TIMESTAMP(engine->mmio_base), offset + 4000);
cs = emit_srm(cs, RING_CTX_TIMESTAMP(engine->mmio_base), offset + 4004);
/* Busy wait */
cs = emit_wait(cs, offset + 4008, MI_SEMAPHORE_SAD_EQ_SDD, 1);
cs = emit_srm(cs, RING_TIMESTAMP(engine->mmio_base), offset + 4016);
cs = emit_srm(cs, RING_CTX_TIMESTAMP(engine->mmio_base), offset + 4012);
intel_ring_advance(rq, cs);
i915_request_get(rq);
i915_request_add(rq);
intel_engine_flush_submission(engine);
/* Wait for the request to start executing, that then waits for us */
while (READ_ONCE(sema[2]) == 0)
cpu_relax();
/* Run the request for a 100us, sampling timestamps before/after */
local_irq_disable();
write_semaphore(&sema[2], 0);
while (READ_ONCE(sema[1]) == 0) /* wait for the gpu to catch up */
cpu_relax();
*dt = local_clock();
udelay(100);
*dt = local_clock() - *dt;
write_semaphore(&sema[2], 1);
local_irq_enable();
if (i915_request_wait(rq, 0, HZ / 2) < 0) {
i915_request_put(rq);
return -ETIME;
}
i915_request_put(rq);
pr_debug("%s CTX_TIMESTAMP: [%x, %x], RING_TIMESTAMP: [%x, %x]\n",
engine->name, sema[1], sema[3], sema[0], sema[4]);
*d_ctx = sema[3] - sema[1];
*d_ring = sema[4] - sema[0];
return 0;
}
static int __live_engine_timestamps(struct intel_engine_cs *engine)
{
u64 s_ring[COUNT], s_ctx[COUNT], st[COUNT], d_ring, d_ctx, dt;
struct intel_context *ce;
int i, err = 0;
ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
for (i = 0; i < COUNT; i++) {
err = __measure_timestamps(ce, &st[i], &s_ring[i], &s_ctx[i]);
if (err)
break;
}
intel_context_put(ce);
if (err)
return err;
dt = trifilter(st);
d_ring = trifilter(s_ring);
d_ctx = trifilter(s_ctx);
pr_info("%s elapsed:%lldns, CTX_TIMESTAMP:%lldns, RING_TIMESTAMP:%lldns\n",
engine->name, dt,
intel_gt_clock_interval_to_ns(engine->gt, d_ctx),
intel_gt_clock_interval_to_ns(engine->gt, d_ring));
d_ring = intel_gt_clock_interval_to_ns(engine->gt, d_ring);
if (3 * dt > 4 * d_ring || 4 * dt < 3 * d_ring) {
pr_err("%s Mismatch between ring timestamp and walltime!\n",
engine->name);
return -EINVAL;
}
d_ring = trifilter(s_ring);
d_ctx = trifilter(s_ctx);
d_ctx *= engine->gt->clock_frequency;
if (GRAPHICS_VER(engine->i915) == 11)
d_ring *= 12500000; /* Fixed 80ns for GEN11 ctx timestamp? */
else
d_ring *= engine->gt->clock_frequency;
if (3 * d_ctx > 4 * d_ring || 4 * d_ctx < 3 * d_ring) {
pr_err("%s Mismatch between ring and context timestamps!\n",
engine->name);
return -EINVAL;
}
return 0;
}
static int live_engine_timestamps(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
/*
* Check that CS_TIMESTAMP / CTX_TIMESTAMP are in sync, i.e. share
* the same CS clock.
*/
if (GRAPHICS_VER(gt->i915) < 8)
return 0;
for_each_engine(engine, gt, id) {
int err;
st_engine_heartbeat_disable(engine);
err = __live_engine_timestamps(engine);
st_engine_heartbeat_enable(engine);
if (err)
return err;
}
return 0;
}
static int __spin_until_busier(struct intel_engine_cs *engine, ktime_t busyness)
{
ktime_t start, unused, dt;
if (!intel_engine_uses_guc(engine))
return 0;
/*
* In GuC mode of submission, the busyness stats may get updated after
* the batch starts running. Poll for a change in busyness and timeout
* after 500 us.
*/
start = ktime_get();
while (intel_engine_get_busy_time(engine, &unused) == busyness) {
dt = ktime_get() - start;
if (dt > 10000000) {
pr_err("active wait timed out %lld\n", dt);
ENGINE_TRACE(engine, "active wait time out %lld\n", dt);
return -ETIME;
}
}
return 0;
}
static int live_engine_busy_stats(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct igt_spinner spin;
int err = 0;
/*
* Check that if an engine supports busy-stats, they tell the truth.
*/
if (igt_spinner_init(&spin, gt))
return -ENOMEM;
GEM_BUG_ON(intel_gt_pm_is_awake(gt));
for_each_engine(engine, gt, id) {
struct i915_request *rq;
ktime_t busyness, dummy;
ktime_t de, dt;
ktime_t t[2];
if (!intel_engine_supports_stats(engine))
continue;
if (!intel_engine_can_store_dword(engine))
continue;
if (intel_gt_pm_wait_for_idle(gt)) {
err = -EBUSY;
break;
}
st_engine_heartbeat_disable(engine);
ENGINE_TRACE(engine, "measuring idle time\n");
preempt_disable();
de = intel_engine_get_busy_time(engine, &t[0]);
udelay(100);
de = ktime_sub(intel_engine_get_busy_time(engine, &t[1]), de);
preempt_enable();
dt = ktime_sub(t[1], t[0]);
if (de < 0 || de > 10) {
pr_err("%s: reported %lldns [%d%%] busyness while sleeping [for %lldns]\n",
engine->name,
de, (int)div64_u64(100 * de, dt), dt);
GEM_TRACE_DUMP();
err = -EINVAL;
goto end;
}
/* 100% busy */
rq = igt_spinner_create_request(&spin,
engine->kernel_context,
MI_NOOP);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto end;
}
i915_request_add(rq);
busyness = intel_engine_get_busy_time(engine, &dummy);
if (!igt_wait_for_spinner(&spin, rq)) {
intel_gt_set_wedged(engine->gt);
err = -ETIME;
goto end;
}
err = __spin_until_busier(engine, busyness);
if (err) {
GEM_TRACE_DUMP();
goto end;
}
ENGINE_TRACE(engine, "measuring busy time\n");
preempt_disable();
de = intel_engine_get_busy_time(engine, &t[0]);
mdelay(100);
de = ktime_sub(intel_engine_get_busy_time(engine, &t[1]), de);
preempt_enable();
dt = ktime_sub(t[1], t[0]);
if (100 * de < 95 * dt || 95 * de > 100 * dt) {
pr_err("%s: reported %lldns [%d%%] busyness while spinning [for %lldns]\n",
engine->name,
de, (int)div64_u64(100 * de, dt), dt);
GEM_TRACE_DUMP();
err = -EINVAL;
goto end;
}
end:
st_engine_heartbeat_enable(engine);
igt_spinner_end(&spin);
if (igt_flush_test(gt->i915))
err = -EIO;
if (err)
break;
}
igt_spinner_fini(&spin);
if (igt_flush_test(gt->i915))
err = -EIO;
return err;
}
static int live_engine_pm(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
/*
* Check we can call intel_engine_pm_put from any context. No
* failures are reported directly, but if we mess up lockdep should
* tell us.
*/
if (intel_gt_pm_wait_for_idle(gt)) {
pr_err("Unable to flush GT pm before test\n");
return -EBUSY;
}
GEM_BUG_ON(intel_gt_pm_is_awake(gt));
for_each_engine(engine, gt, id) {
const typeof(*igt_atomic_phases) *p;
for (p = igt_atomic_phases; p->name; p++) {
/*
* Acquisition is always synchronous, except if we
* know that the engine is already awake, in which
* case we should use intel_engine_pm_get_if_awake()
* to atomically grab the wakeref.
*
* In practice,
* intel_engine_pm_get();
* intel_engine_pm_put();
* occurs in one thread, while simultaneously
* intel_engine_pm_get_if_awake();
* intel_engine_pm_put();
* occurs from atomic context in another.
*/
GEM_BUG_ON(intel_engine_pm_is_awake(engine));
intel_engine_pm_get(engine);
p->critical_section_begin();
if (!intel_engine_pm_get_if_awake(engine))
pr_err("intel_engine_pm_get_if_awake(%s) failed under %s\n",
engine->name, p->name);
else
intel_engine_pm_put_async(engine);
intel_engine_pm_put_async(engine);
p->critical_section_end();
intel_engine_pm_flush(engine);
if (intel_engine_pm_is_awake(engine)) {
pr_err("%s is still awake after flushing pm\n",
engine->name);
return -EINVAL;
}
/* gt wakeref is async (deferred to workqueue) */
if (intel_gt_pm_wait_for_idle(gt)) {
gt_err(gt, "GT failed to idle\n");
return -EINVAL;
}
}
}
return 0;
}
int live_engine_pm_selftests(struct intel_gt *gt)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_engine_timestamps),
SUBTEST(live_engine_busy_stats),
SUBTEST(live_engine_pm),
};
return intel_gt_live_subtests(tests, gt);
}
| linux-master | drivers/gpu/drm/i915/gt/selftest_engine_pm.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
#include <linux/sched/clock.h>
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
#include "intel_breadcrumbs.h"
#include "intel_gt.h"
#include "intel_gt_irq.h"
#include "intel_gt_print.h"
#include "intel_gt_regs.h"
#include "intel_uncore.h"
#include "intel_rps.h"
#include "pxp/intel_pxp_irq.h"
#include "uc/intel_gsc_proxy.h"
static void guc_irq_handler(struct intel_guc *guc, u16 iir)
{
if (unlikely(!guc->interrupts.enabled))
return;
if (iir & GUC_INTR_GUC2HOST)
intel_guc_to_host_event_handler(guc);
}
static u32
gen11_gt_engine_identity(struct intel_gt *gt,
const unsigned int bank, const unsigned int bit)
{
void __iomem * const regs = intel_uncore_regs(gt->uncore);
u32 timeout_ts;
u32 ident;
lockdep_assert_held(gt->irq_lock);
raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
/*
* NB: Specs do not specify how long to spin wait,
* so we do ~100us as an educated guess.
*/
timeout_ts = (local_clock() >> 10) + 100;
do {
ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
} while (!(ident & GEN11_INTR_DATA_VALID) &&
!time_after32(local_clock() >> 10, timeout_ts));
if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
gt_err(gt, "INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
bank, bit, ident);
return 0;
}
raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
GEN11_INTR_DATA_VALID);
return ident;
}
static void
gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
const u16 iir)
{
struct intel_gt *media_gt = gt->i915->media_gt;
if (instance == OTHER_GUC_INSTANCE)
return guc_irq_handler(>->uc.guc, iir);
if (instance == OTHER_MEDIA_GUC_INSTANCE && media_gt)
return guc_irq_handler(&media_gt->uc.guc, iir);
if (instance == OTHER_GTPM_INSTANCE)
return gen11_rps_irq_handler(>->rps, iir);
if (instance == OTHER_MEDIA_GTPM_INSTANCE && media_gt)
return gen11_rps_irq_handler(&media_gt->rps, iir);
if (instance == OTHER_KCR_INSTANCE)
return intel_pxp_irq_handler(gt->i915->pxp, iir);
if (instance == OTHER_GSC_INSTANCE)
return intel_gsc_irq_handler(gt, iir);
if (instance == OTHER_GSC_HECI_2_INSTANCE)
return intel_gsc_proxy_irq_handler(>->uc.gsc, iir);
WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
instance, iir);
}
static struct intel_gt *pick_gt(struct intel_gt *gt, u8 class, u8 instance)
{
struct intel_gt *media_gt = gt->i915->media_gt;
/* we expect the non-media gt to be passed in */
GEM_BUG_ON(gt == media_gt);
if (!media_gt)
return gt;
switch (class) {
case VIDEO_DECODE_CLASS:
case VIDEO_ENHANCEMENT_CLASS:
return media_gt;
case OTHER_CLASS:
if (instance == OTHER_GSC_HECI_2_INSTANCE)
return media_gt;
if ((instance == OTHER_GSC_INSTANCE || instance == OTHER_KCR_INSTANCE) &&
HAS_ENGINE(media_gt, GSC0))
return media_gt;
fallthrough;
default:
return gt;
}
}
static void
gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity)
{
const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
if (unlikely(!intr))
return;
/*
* Platforms with standalone media have the media and GSC engines in
* another GT.
*/
gt = pick_gt(gt, class, instance);
if (class <= MAX_ENGINE_CLASS && instance <= MAX_ENGINE_INSTANCE) {
struct intel_engine_cs *engine = gt->engine_class[class][instance];
if (engine)
return intel_engine_cs_irq(engine, intr);
}
if (class == OTHER_CLASS)
return gen11_other_irq_handler(gt, instance, intr);
WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
class, instance, intr);
}
static void
gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
{
void __iomem * const regs = intel_uncore_regs(gt->uncore);
unsigned long intr_dw;
unsigned int bit;
lockdep_assert_held(gt->irq_lock);
intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
for_each_set_bit(bit, &intr_dw, 32) {
const u32 ident = gen11_gt_engine_identity(gt, bank, bit);
gen11_gt_identity_handler(gt, ident);
}
/* Clear must be after shared has been served for engine */
raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
}
void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
{
unsigned int bank;
spin_lock(gt->irq_lock);
for (bank = 0; bank < 2; bank++) {
if (master_ctl & GEN11_GT_DW_IRQ(bank))
gen11_gt_bank_handler(gt, bank);
}
spin_unlock(gt->irq_lock);
}
bool gen11_gt_reset_one_iir(struct intel_gt *gt,
const unsigned int bank, const unsigned int bit)
{
void __iomem * const regs = intel_uncore_regs(gt->uncore);
u32 dw;
lockdep_assert_held(gt->irq_lock);
dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
if (dw & BIT(bit)) {
/*
* According to the BSpec, DW_IIR bits cannot be cleared without
* first servicing the Selector & Shared IIR registers.
*/
gen11_gt_engine_identity(gt, bank, bit);
/*
* We locked GT INT DW by reading it. If we want to (try
* to) recover from this successfully, we need to clear
* our bit, otherwise we are locking the register for
* everybody.
*/
raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
return true;
}
return false;
}
void gen11_gt_irq_reset(struct intel_gt *gt)
{
struct intel_uncore *uncore = gt->uncore;
/* Disable RCS, BCS, VCS and VECS class engines. */
intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0);
intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0);
if (CCS_MASK(gt))
intel_uncore_write(uncore, GEN12_CCS_RSVD_INTR_ENABLE, 0);
if (HAS_HECI_GSC(gt->i915) || HAS_ENGINE(gt, GSC0))
intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_ENABLE, 0);
/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~0);
intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~0);
if (HAS_ENGINE(gt, BCS1) || HAS_ENGINE(gt, BCS2))
intel_uncore_write(uncore, XEHPC_BCS1_BCS2_INTR_MASK, ~0);
if (HAS_ENGINE(gt, BCS3) || HAS_ENGINE(gt, BCS4))
intel_uncore_write(uncore, XEHPC_BCS3_BCS4_INTR_MASK, ~0);
if (HAS_ENGINE(gt, BCS5) || HAS_ENGINE(gt, BCS6))
intel_uncore_write(uncore, XEHPC_BCS5_BCS6_INTR_MASK, ~0);
if (HAS_ENGINE(gt, BCS7) || HAS_ENGINE(gt, BCS8))
intel_uncore_write(uncore, XEHPC_BCS7_BCS8_INTR_MASK, ~0);
intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~0);
intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~0);
if (HAS_ENGINE(gt, VCS4) || HAS_ENGINE(gt, VCS5))
intel_uncore_write(uncore, GEN12_VCS4_VCS5_INTR_MASK, ~0);
if (HAS_ENGINE(gt, VCS6) || HAS_ENGINE(gt, VCS7))
intel_uncore_write(uncore, GEN12_VCS6_VCS7_INTR_MASK, ~0);
intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~0);
if (HAS_ENGINE(gt, VECS2) || HAS_ENGINE(gt, VECS3))
intel_uncore_write(uncore, GEN12_VECS2_VECS3_INTR_MASK, ~0);
if (HAS_ENGINE(gt, CCS0) || HAS_ENGINE(gt, CCS1))
intel_uncore_write(uncore, GEN12_CCS0_CCS1_INTR_MASK, ~0);
if (HAS_ENGINE(gt, CCS2) || HAS_ENGINE(gt, CCS3))
intel_uncore_write(uncore, GEN12_CCS2_CCS3_INTR_MASK, ~0);
if (HAS_HECI_GSC(gt->i915) || HAS_ENGINE(gt, GSC0))
intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_MASK, ~0);
intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0);
intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_ENABLE, 0);
intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_MASK, ~0);
}
void gen11_gt_irq_postinstall(struct intel_gt *gt)
{
struct intel_uncore *uncore = gt->uncore;
u32 irqs = GT_RENDER_USER_INTERRUPT;
u32 guc_mask = intel_uc_wants_guc(>->uc) ? GUC_INTR_GUC2HOST : 0;
u32 gsc_mask = 0;
u32 heci_mask = 0;
u32 dmask;
u32 smask;
if (!intel_uc_wants_guc_submission(>->uc))
irqs |= GT_CS_MASTER_ERROR_INTERRUPT |
GT_CONTEXT_SWITCH_INTERRUPT |
GT_WAIT_SEMAPHORE_INTERRUPT;
dmask = irqs << 16 | irqs;
smask = irqs << 16;
if (HAS_ENGINE(gt, GSC0)) {
/*
* the heci2 interrupt is enabled via the same register as the
* GSC interrupt, but it has its own mask register.
*/
gsc_mask = irqs;
heci_mask = GSC_IRQ_INTF(1); /* HECI2 IRQ for SW Proxy*/
} else if (HAS_HECI_GSC(gt->i915)) {
gsc_mask = GSC_IRQ_INTF(0) | GSC_IRQ_INTF(1);
}
BUILD_BUG_ON(irqs & 0xffff0000);
/* Enable RCS, BCS, VCS and VECS class interrupts. */
intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask);
intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask);
if (CCS_MASK(gt))
intel_uncore_write(uncore, GEN12_CCS_RSVD_INTR_ENABLE, smask);
if (gsc_mask)
intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_ENABLE, gsc_mask | heci_mask);
/* Unmask irqs on RCS, BCS, VCS and VECS engines. */
intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask);
intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask);
if (HAS_ENGINE(gt, BCS1) || HAS_ENGINE(gt, BCS2))
intel_uncore_write(uncore, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask);
if (HAS_ENGINE(gt, BCS3) || HAS_ENGINE(gt, BCS4))
intel_uncore_write(uncore, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask);
if (HAS_ENGINE(gt, BCS5) || HAS_ENGINE(gt, BCS6))
intel_uncore_write(uncore, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask);
if (HAS_ENGINE(gt, BCS7) || HAS_ENGINE(gt, BCS8))
intel_uncore_write(uncore, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask);
intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask);
intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask);
if (HAS_ENGINE(gt, VCS4) || HAS_ENGINE(gt, VCS5))
intel_uncore_write(uncore, GEN12_VCS4_VCS5_INTR_MASK, ~dmask);
if (HAS_ENGINE(gt, VCS6) || HAS_ENGINE(gt, VCS7))
intel_uncore_write(uncore, GEN12_VCS6_VCS7_INTR_MASK, ~dmask);
intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask);
if (HAS_ENGINE(gt, VECS2) || HAS_ENGINE(gt, VECS3))
intel_uncore_write(uncore, GEN12_VECS2_VECS3_INTR_MASK, ~dmask);
if (HAS_ENGINE(gt, CCS0) || HAS_ENGINE(gt, CCS1))
intel_uncore_write(uncore, GEN12_CCS0_CCS1_INTR_MASK, ~dmask);
if (HAS_ENGINE(gt, CCS2) || HAS_ENGINE(gt, CCS3))
intel_uncore_write(uncore, GEN12_CCS2_CCS3_INTR_MASK, ~dmask);
if (gsc_mask)
intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_MASK, ~gsc_mask);
if (heci_mask)
intel_uncore_write(uncore, GEN12_HECI2_RSVD_INTR_MASK,
~REG_FIELD_PREP(ENGINE1_MASK, heci_mask));
if (guc_mask) {
/* the enable bit is common for both GTs but the masks are separate */
u32 mask = gt->type == GT_MEDIA ?
REG_FIELD_PREP(ENGINE0_MASK, guc_mask) :
REG_FIELD_PREP(ENGINE1_MASK, guc_mask);
intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE,
REG_FIELD_PREP(ENGINE1_MASK, guc_mask));
/* we might not be the first GT to write this reg */
intel_uncore_rmw(uncore, MTL_GUC_MGUC_INTR_MASK, mask, 0);
}
/*
* RPS interrupts will get enabled/disabled on demand when RPS itself
* is enabled/disabled.
*/
gt->pm_ier = 0x0;
gt->pm_imr = ~gt->pm_ier;
intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
}
void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
gt_iir);
if (gt_iir & ILK_BSD_USER_INTERRUPT)
intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
gt_iir);
}
static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
{
if (!HAS_L3_DPF(gt->i915))
return;
spin_lock(gt->irq_lock);
gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915));
spin_unlock(gt->irq_lock);
if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
gt->i915->l3_parity.which_slice |= 1 << 1;
if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
gt->i915->l3_parity.which_slice |= 1 << 0;
queue_work(gt->i915->unordered_wq, >->i915->l3_parity.error_work);
}
void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
gt_iir);
if (gt_iir & GT_BSD_USER_INTERRUPT)
intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
gt_iir >> 12);
if (gt_iir & GT_BLT_USER_INTERRUPT)
intel_engine_cs_irq(gt->engine_class[COPY_ENGINE_CLASS][0],
gt_iir >> 22);
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
GT_CS_MASTER_ERROR_INTERRUPT))
gt_dbg(gt, "Command parser error, gt_iir 0x%08x\n", gt_iir);
if (gt_iir & GT_PARITY_ERROR(gt->i915))
gen7_parity_error_irq_handler(gt, gt_iir);
}
void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl)
{
void __iomem * const regs = intel_uncore_regs(gt->uncore);
u32 iir;
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
iir = raw_reg_read(regs, GEN8_GT_IIR(0));
if (likely(iir)) {
intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
iir >> GEN8_RCS_IRQ_SHIFT);
intel_engine_cs_irq(gt->engine_class[COPY_ENGINE_CLASS][0],
iir >> GEN8_BCS_IRQ_SHIFT);
raw_reg_write(regs, GEN8_GT_IIR(0), iir);
}
}
if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
iir = raw_reg_read(regs, GEN8_GT_IIR(1));
if (likely(iir)) {
intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
iir >> GEN8_VCS0_IRQ_SHIFT);
intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][1],
iir >> GEN8_VCS1_IRQ_SHIFT);
raw_reg_write(regs, GEN8_GT_IIR(1), iir);
}
}
if (master_ctl & GEN8_GT_VECS_IRQ) {
iir = raw_reg_read(regs, GEN8_GT_IIR(3));
if (likely(iir)) {
intel_engine_cs_irq(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0],
iir >> GEN8_VECS_IRQ_SHIFT);
raw_reg_write(regs, GEN8_GT_IIR(3), iir);
}
}
if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
iir = raw_reg_read(regs, GEN8_GT_IIR(2));
if (likely(iir)) {
gen6_rps_irq_handler(>->rps, iir);
guc_irq_handler(>->uc.guc, iir >> 16);
raw_reg_write(regs, GEN8_GT_IIR(2), iir);
}
}
}
void gen8_gt_irq_reset(struct intel_gt *gt)
{
struct intel_uncore *uncore = gt->uncore;
GEN8_IRQ_RESET_NDX(uncore, GT, 0);
GEN8_IRQ_RESET_NDX(uncore, GT, 1);
GEN8_IRQ_RESET_NDX(uncore, GT, 2);
GEN8_IRQ_RESET_NDX(uncore, GT, 3);
}
void gen8_gt_irq_postinstall(struct intel_gt *gt)
{
/* These are interrupts we'll toggle with the ring mask register */
const u32 irqs =
GT_CS_MASTER_ERROR_INTERRUPT |
GT_RENDER_USER_INTERRUPT |
GT_CONTEXT_SWITCH_INTERRUPT |
GT_WAIT_SEMAPHORE_INTERRUPT;
const u32 gt_interrupts[] = {
irqs << GEN8_RCS_IRQ_SHIFT | irqs << GEN8_BCS_IRQ_SHIFT,
irqs << GEN8_VCS0_IRQ_SHIFT | irqs << GEN8_VCS1_IRQ_SHIFT,
0,
irqs << GEN8_VECS_IRQ_SHIFT,
};
struct intel_uncore *uncore = gt->uncore;
gt->pm_ier = 0x0;
gt->pm_imr = ~gt->pm_ier;
GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
/*
* RPS interrupts will get enabled/disabled on demand when RPS itself
* is enabled/disabled. Same wil be the case for GuC interrupts.
*/
GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier);
GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
}
static void gen5_gt_update_irq(struct intel_gt *gt,
u32 interrupt_mask,
u32 enabled_irq_mask)
{
lockdep_assert_held(gt->irq_lock);
GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask);
gt->gt_imr &= ~interrupt_mask;
gt->gt_imr |= (~enabled_irq_mask & interrupt_mask);
intel_uncore_write(gt->uncore, GTIMR, gt->gt_imr);
}
void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask)
{
gen5_gt_update_irq(gt, mask, mask);
intel_uncore_posting_read_fw(gt->uncore, GTIMR);
}
void gen5_gt_disable_irq(struct intel_gt *gt, u32 mask)
{
gen5_gt_update_irq(gt, mask, 0);
}
void gen5_gt_irq_reset(struct intel_gt *gt)
{
struct intel_uncore *uncore = gt->uncore;
GEN3_IRQ_RESET(uncore, GT);
if (GRAPHICS_VER(gt->i915) >= 6)
GEN3_IRQ_RESET(uncore, GEN6_PM);
}
void gen5_gt_irq_postinstall(struct intel_gt *gt)
{
struct intel_uncore *uncore = gt->uncore;
u32 pm_irqs = 0;
u32 gt_irqs = 0;
gt->gt_imr = ~0;
if (HAS_L3_DPF(gt->i915)) {
/* L3 parity interrupt is always unmasked. */
gt->gt_imr = ~GT_PARITY_ERROR(gt->i915);
gt_irqs |= GT_PARITY_ERROR(gt->i915);
}
gt_irqs |= GT_RENDER_USER_INTERRUPT;
if (GRAPHICS_VER(gt->i915) == 5)
gt_irqs |= ILK_BSD_USER_INTERRUPT;
else
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
GEN3_IRQ_INIT(uncore, GT, gt->gt_imr, gt_irqs);
if (GRAPHICS_VER(gt->i915) >= 6) {
/*
* RPS interrupts will get enabled/disabled on demand when RPS
* itself is enabled/disabled.
*/
if (HAS_ENGINE(gt, VECS0)) {
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
gt->pm_ier |= PM_VEBOX_USER_INTERRUPT;
}
gt->pm_imr = 0xffffffff;
GEN3_IRQ_INIT(uncore, GEN6_PM, gt->pm_imr, pm_irqs);
}
}
| linux-master | drivers/gpu/drm/i915/gt/intel_gt_irq.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_object.h"
#include "i915_drv.h"
#include "i915_vma.h"
#include "intel_engine.h"
#include "intel_engine_regs.h"
#include "intel_gpu_commands.h"
#include "intel_ring.h"
#include "intel_gt.h"
#include "intel_timeline.h"
unsigned int intel_ring_update_space(struct intel_ring *ring)
{
unsigned int space;
space = __intel_ring_space(ring->head, ring->emit, ring->size);
ring->space = space;
return space;
}
void __intel_ring_pin(struct intel_ring *ring)
{
GEM_BUG_ON(!atomic_read(&ring->pin_count));
atomic_inc(&ring->pin_count);
}
int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww)
{
struct i915_vma *vma = ring->vma;
unsigned int flags;
void *addr;
int ret;
if (atomic_fetch_inc(&ring->pin_count))
return 0;
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
if (i915_gem_object_is_stolen(vma->obj))
flags |= PIN_MAPPABLE;
else
flags |= PIN_HIGH;
ret = i915_ggtt_pin(vma, ww, 0, flags);
if (unlikely(ret))
goto err_unpin;
if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915)) {
addr = (void __force *)i915_vma_pin_iomap(vma);
} else {
int type = intel_gt_coherent_map_type(vma->vm->gt, vma->obj, false);
addr = i915_gem_object_pin_map(vma->obj, type);
}
if (IS_ERR(addr)) {
ret = PTR_ERR(addr);
goto err_ring;
}
i915_vma_make_unshrinkable(vma);
/* Discard any unused bytes beyond that submitted to hw. */
intel_ring_reset(ring, ring->emit);
ring->vaddr = addr;
return 0;
err_ring:
i915_vma_unpin(vma);
err_unpin:
atomic_dec(&ring->pin_count);
return ret;
}
void intel_ring_reset(struct intel_ring *ring, u32 tail)
{
tail = intel_ring_wrap(ring, tail);
ring->tail = tail;
ring->head = tail;
ring->emit = tail;
intel_ring_update_space(ring);
}
void intel_ring_unpin(struct intel_ring *ring)
{
struct i915_vma *vma = ring->vma;
if (!atomic_dec_and_test(&ring->pin_count))
return;
i915_vma_unset_ggtt_write(vma);
if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915))
i915_vma_unpin_iomap(vma);
else
i915_gem_object_unpin_map(vma->obj);
i915_vma_make_purgeable(vma);
i915_vma_unpin(vma);
}
static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
{
struct i915_address_space *vm = &ggtt->vm;
struct drm_i915_private *i915 = vm->i915;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_VOLATILE |
I915_BO_ALLOC_PM_VOLATILE);
if (IS_ERR(obj) && i915_ggtt_has_aperture(ggtt) && !HAS_LLC(i915))
obj = i915_gem_object_create_stolen(i915, size);
if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj))
return ERR_CAST(obj);
/*
* Mark ring buffers as read-only from GPU side (so no stray overwrites)
* if supported by the platform's GGTT.
*/
if (vm->has_read_only)
i915_gem_object_set_readonly(obj);
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma))
goto err;
return vma;
err:
i915_gem_object_put(obj);
return vma;
}
struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs *engine, int size)
{
struct drm_i915_private *i915 = engine->i915;
struct intel_ring *ring;
struct i915_vma *vma;
GEM_BUG_ON(!is_power_of_2(size));
GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
return ERR_PTR(-ENOMEM);
kref_init(&ring->ref);
ring->size = size;
ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(size);
/*
* Workaround an erratum on the i830 which causes a hang if
* the TAIL pointer points to within the last 2 cachelines
* of the buffer.
*/
ring->effective_size = size;
if (IS_I830(i915) || IS_I845G(i915))
ring->effective_size -= 2 * CACHELINE_BYTES;
intel_ring_update_space(ring);
vma = create_ring_vma(engine->gt->ggtt, size);
if (IS_ERR(vma)) {
kfree(ring);
return ERR_CAST(vma);
}
ring->vma = vma;
return ring;
}
void intel_ring_free(struct kref *ref)
{
struct intel_ring *ring = container_of(ref, typeof(*ring), ref);
i915_vma_put(ring->vma);
kfree(ring);
}
static noinline int
wait_for_space(struct intel_ring *ring,
struct intel_timeline *tl,
unsigned int bytes)
{
struct i915_request *target;
long timeout;
if (intel_ring_update_space(ring) >= bytes)
return 0;
GEM_BUG_ON(list_empty(&tl->requests));
list_for_each_entry(target, &tl->requests, link) {
if (target->ring != ring)
continue;
/* Would completion of this request free enough space? */
if (bytes <= __intel_ring_space(target->postfix,
ring->emit, ring->size))
break;
}
if (GEM_WARN_ON(&target->link == &tl->requests))
return -ENOSPC;
timeout = i915_request_wait(target,
I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0)
return timeout;
i915_request_retire_upto(target);
intel_ring_update_space(ring);
GEM_BUG_ON(ring->space < bytes);
return 0;
}
u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
{
struct intel_ring *ring = rq->ring;
const unsigned int remain_usable = ring->effective_size - ring->emit;
const unsigned int bytes = num_dwords * sizeof(u32);
unsigned int need_wrap = 0;
unsigned int total_bytes;
u32 *cs;
/* Packets must be qword aligned. */
GEM_BUG_ON(num_dwords & 1);
total_bytes = bytes + rq->reserved_space;
GEM_BUG_ON(total_bytes > ring->effective_size);
if (unlikely(total_bytes > remain_usable)) {
const int remain_actual = ring->size - ring->emit;
if (bytes > remain_usable) {
/*
* Not enough space for the basic request. So need to
* flush out the remainder and then wait for
* base + reserved.
*/
total_bytes += remain_actual;
need_wrap = remain_actual | 1;
} else {
/*
* The base request will fit but the reserved space
* falls off the end. So we don't need an immediate
* wrap and only need to effectively wait for the
* reserved size from the start of ringbuffer.
*/
total_bytes = rq->reserved_space + remain_actual;
}
}
if (unlikely(total_bytes > ring->space)) {
int ret;
/*
* Space is reserved in the ringbuffer for finalising the
* request, as that cannot be allowed to fail. During request
* finalisation, reserved_space is set to 0 to stop the
* overallocation and the assumption is that then we never need
* to wait (which has the risk of failing with EINTR).
*
* See also i915_request_alloc() and i915_request_add().
*/
GEM_BUG_ON(!rq->reserved_space);
ret = wait_for_space(ring,
i915_request_timeline(rq),
total_bytes);
if (unlikely(ret))
return ERR_PTR(ret);
}
if (unlikely(need_wrap)) {
need_wrap &= ~1;
GEM_BUG_ON(need_wrap > ring->space);
GEM_BUG_ON(ring->emit + need_wrap > ring->size);
GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64)));
/* Fill the tail with MI_NOOP */
memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64));
ring->space -= need_wrap;
ring->emit = 0;
}
GEM_BUG_ON(ring->emit > ring->size - bytes);
GEM_BUG_ON(ring->space < bytes);
cs = ring->vaddr + ring->emit;
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
memset32(cs, POISON_INUSE, bytes / sizeof(*cs));
ring->emit += bytes;
ring->space -= bytes;
return cs;
}
/* Align the ring tail to a cacheline boundary */
int intel_ring_cacheline_align(struct i915_request *rq)
{
int num_dwords;
void *cs;
num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
if (num_dwords == 0)
return 0;
num_dwords = CACHELINE_DWORDS - num_dwords;
GEM_BUG_ON(num_dwords & 1);
cs = intel_ring_begin(rq, num_dwords);
if (IS_ERR(cs))
return PTR_ERR(cs);
memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2);
intel_ring_advance(rq, cs + num_dwords);
GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
return 0;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_ring.c"
#endif
| linux-master | drivers/gpu/drm/i915/gt/intel_ring.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
#include "gen7_renderclear.h"
#include "i915_drv.h"
#include "intel_gpu_commands.h"
#include "intel_gt_regs.h"
#define GT3_INLINE_DATA_DELAYS 0x1E00
#define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS))
struct cb_kernel {
const void *data;
u32 size;
};
#define CB_KERNEL(name) { .data = (name), .size = sizeof(name) }
#include "ivb_clear_kernel.c"
static const struct cb_kernel cb_kernel_ivb = CB_KERNEL(ivb_clear_kernel);
#include "hsw_clear_kernel.c"
static const struct cb_kernel cb_kernel_hsw = CB_KERNEL(hsw_clear_kernel);
struct batch_chunk {
struct i915_vma *vma;
u32 offset;
u32 *start;
u32 *end;
u32 max_items;
};
struct batch_vals {
u32 max_threads;
u32 state_start;
u32 surface_start;
u32 surface_height;
u32 surface_width;
u32 size;
};
static int num_primitives(const struct batch_vals *bv)
{
/*
* We need to saturate the GPU with work in order to dispatch
* a shader on every HW thread, and clear the thread-local registers.
* In short, we have to dispatch work faster than the shaders can
* run in order to fill the EU and occupy each HW thread.
*/
return bv->max_threads;
}
static void
batch_get_defaults(struct drm_i915_private *i915, struct batch_vals *bv)
{
if (IS_HASWELL(i915)) {
switch (INTEL_INFO(i915)->gt) {
default:
case 1:
bv->max_threads = 70;
break;
case 2:
bv->max_threads = 140;
break;
case 3:
bv->max_threads = 280;
break;
}
bv->surface_height = 16 * 16;
bv->surface_width = 32 * 2 * 16;
} else {
switch (INTEL_INFO(i915)->gt) {
default:
case 1: /* including vlv */
bv->max_threads = 36;
break;
case 2:
bv->max_threads = 128;
break;
}
bv->surface_height = 16 * 8;
bv->surface_width = 32 * 16;
}
bv->state_start = round_up(SZ_1K + num_primitives(bv) * 64, SZ_4K);
bv->surface_start = bv->state_start + SZ_4K;
bv->size = bv->surface_start + bv->surface_height * bv->surface_width;
}
static void batch_init(struct batch_chunk *bc,
struct i915_vma *vma,
u32 *start, u32 offset, u32 max_bytes)
{
bc->vma = vma;
bc->offset = offset;
bc->start = start + bc->offset / sizeof(*bc->start);
bc->end = bc->start;
bc->max_items = max_bytes / sizeof(*bc->start);
}
static u32 batch_offset(const struct batch_chunk *bc, u32 *cs)
{
return (cs - bc->start) * sizeof(*bc->start) + bc->offset;
}
static u32 batch_addr(const struct batch_chunk *bc)
{
return i915_vma_offset(bc->vma);
}
static void batch_add(struct batch_chunk *bc, const u32 d)
{
GEM_BUG_ON((bc->end - bc->start) >= bc->max_items);
*bc->end++ = d;
}
static u32 *batch_alloc_items(struct batch_chunk *bc, u32 align, u32 items)
{
u32 *map;
if (align) {
u32 *end = PTR_ALIGN(bc->end, align);
memset32(bc->end, 0, end - bc->end);
bc->end = end;
}
map = bc->end;
bc->end += items;
return map;
}
static u32 *batch_alloc_bytes(struct batch_chunk *bc, u32 align, u32 bytes)
{
GEM_BUG_ON(!IS_ALIGNED(bytes, sizeof(*bc->start)));
return batch_alloc_items(bc, align, bytes / sizeof(*bc->start));
}
static u32
gen7_fill_surface_state(struct batch_chunk *state,
const u32 dst_offset,
const struct batch_vals *bv)
{
u32 surface_h = bv->surface_height;
u32 surface_w = bv->surface_width;
u32 *cs = batch_alloc_items(state, 32, 8);
u32 offset = batch_offset(state, cs);
#define SURFACE_2D 1
#define SURFACEFORMAT_B8G8R8A8_UNORM 0x0C0
#define RENDER_CACHE_READ_WRITE 1
*cs++ = SURFACE_2D << 29 |
(SURFACEFORMAT_B8G8R8A8_UNORM << 18) |
(RENDER_CACHE_READ_WRITE << 8);
*cs++ = batch_addr(state) + dst_offset;
*cs++ = ((surface_h / 4 - 1) << 16) | (surface_w / 4 - 1);
*cs++ = surface_w;
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
#define SHADER_CHANNELS(r, g, b, a) \
(((r) << 25) | ((g) << 22) | ((b) << 19) | ((a) << 16))
*cs++ = SHADER_CHANNELS(4, 5, 6, 7);
batch_advance(state, cs);
return offset;
}
static u32
gen7_fill_binding_table(struct batch_chunk *state,
const struct batch_vals *bv)
{
u32 surface_start =
gen7_fill_surface_state(state, bv->surface_start, bv);
u32 *cs = batch_alloc_items(state, 32, 8);
u32 offset = batch_offset(state, cs);
*cs++ = surface_start - state->offset;
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
batch_advance(state, cs);
return offset;
}
static u32
gen7_fill_kernel_data(struct batch_chunk *state,
const u32 *data,
const u32 size)
{
return batch_offset(state,
memcpy(batch_alloc_bytes(state, 64, size),
data, size));
}
static u32
gen7_fill_interface_descriptor(struct batch_chunk *state,
const struct batch_vals *bv,
const struct cb_kernel *kernel,
unsigned int count)
{
u32 kernel_offset =
gen7_fill_kernel_data(state, kernel->data, kernel->size);
u32 binding_table = gen7_fill_binding_table(state, bv);
u32 *cs = batch_alloc_items(state, 32, 8 * count);
u32 offset = batch_offset(state, cs);
*cs++ = kernel_offset;
*cs++ = (1 << 7) | (1 << 13);
*cs++ = 0;
*cs++ = (binding_table - state->offset) | 1;
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
/* 1 - 63dummy idds */
memset32(cs, 0x00, (count - 1) * 8);
batch_advance(state, cs + (count - 1) * 8);
return offset;
}
static void
gen7_emit_state_base_address(struct batch_chunk *batch,
u32 surface_state_base)
{
u32 *cs = batch_alloc_items(batch, 0, 10);
*cs++ = STATE_BASE_ADDRESS | (10 - 2);
/* general */
*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
/* surface */
*cs++ = (batch_addr(batch) + surface_state_base) | BASE_ADDRESS_MODIFY;
/* dynamic */
*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
/* indirect */
*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
/* instruction */
*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
/* general/dynamic/indirect/instruction access Bound */
*cs++ = 0;
*cs++ = BASE_ADDRESS_MODIFY;
*cs++ = 0;
*cs++ = BASE_ADDRESS_MODIFY;
batch_advance(batch, cs);
}
static void
gen7_emit_vfe_state(struct batch_chunk *batch,
const struct batch_vals *bv,
u32 urb_size, u32 curbe_size,
u32 mode)
{
u32 threads = bv->max_threads - 1;
u32 *cs = batch_alloc_items(batch, 32, 8);
*cs++ = MEDIA_VFE_STATE | (8 - 2);
/* scratch buffer */
*cs++ = 0;
/* number of threads & urb entries for GPGPU vs Media Mode */
*cs++ = threads << 16 | 1 << 8 | mode << 2;
*cs++ = 0;
/* urb entry size & curbe size in 256 bits unit */
*cs++ = urb_size << 16 | curbe_size;
/* scoreboard */
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
batch_advance(batch, cs);
}
static void
gen7_emit_interface_descriptor_load(struct batch_chunk *batch,
const u32 interface_descriptor,
unsigned int count)
{
u32 *cs = batch_alloc_items(batch, 8, 4);
*cs++ = MEDIA_INTERFACE_DESCRIPTOR_LOAD | (4 - 2);
*cs++ = 0;
*cs++ = count * 8 * sizeof(*cs);
/*
* interface descriptor address - it is relative to the dynamics base
* address
*/
*cs++ = interface_descriptor;
batch_advance(batch, cs);
}
static void
gen7_emit_media_object(struct batch_chunk *batch,
unsigned int media_object_index)
{
unsigned int x_offset = (media_object_index % 16) * 64;
unsigned int y_offset = (media_object_index / 16) * 16;
unsigned int pkt = 6 + 3;
u32 *cs;
cs = batch_alloc_items(batch, 8, pkt);
*cs++ = MEDIA_OBJECT | (pkt - 2);
/* interface descriptor offset */
*cs++ = 0;
/* without indirect data */
*cs++ = 0;
*cs++ = 0;
/* scoreboard */
*cs++ = 0;
*cs++ = 0;
/* inline */
*cs++ = y_offset << 16 | x_offset;
*cs++ = 0;
*cs++ = GT3_INLINE_DATA_DELAYS;
batch_advance(batch, cs);
}
static void gen7_emit_pipeline_flush(struct batch_chunk *batch)
{
u32 *cs = batch_alloc_items(batch, 0, 4);
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
PIPE_CONTROL_DC_FLUSH_ENABLE |
PIPE_CONTROL_CS_STALL;
*cs++ = 0;
*cs++ = 0;
batch_advance(batch, cs);
}
static void gen7_emit_pipeline_invalidate(struct batch_chunk *batch)
{
u32 *cs = batch_alloc_items(batch, 0, 10);
/* ivb: Stall before STATE_CACHE_INVALIDATE */
*cs++ = GFX_OP_PIPE_CONTROL(5);
*cs++ = PIPE_CONTROL_STALL_AT_SCOREBOARD |
PIPE_CONTROL_CS_STALL;
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
*cs++ = GFX_OP_PIPE_CONTROL(5);
*cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE;
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
batch_advance(batch, cs);
}
static void emit_batch(struct i915_vma * const vma,
u32 *start,
const struct batch_vals *bv)
{
struct drm_i915_private *i915 = vma->vm->i915;
const unsigned int desc_count = 1;
const unsigned int urb_size = 1;
struct batch_chunk cmds, state;
u32 descriptors;
unsigned int i;
batch_init(&cmds, vma, start, 0, bv->state_start);
batch_init(&state, vma, start, bv->state_start, SZ_4K);
descriptors = gen7_fill_interface_descriptor(&state, bv,
IS_HASWELL(i915) ?
&cb_kernel_hsw :
&cb_kernel_ivb,
desc_count);
/* Reset inherited context registers */
gen7_emit_pipeline_flush(&cmds);
gen7_emit_pipeline_invalidate(&cmds);
batch_add(&cmds, MI_LOAD_REGISTER_IMM(2));
batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7));
batch_add(&cmds, 0xffff0000 |
((IS_IVB_GT1(i915) || IS_VALLEYVIEW(i915)) ?
HIZ_RAW_STALL_OPT_DISABLE :
0));
batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1));
batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
gen7_emit_pipeline_invalidate(&cmds);
gen7_emit_pipeline_flush(&cmds);
/* Switch to the media pipeline and our base address */
gen7_emit_pipeline_invalidate(&cmds);
batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
batch_add(&cmds, MI_NOOP);
gen7_emit_pipeline_invalidate(&cmds);
gen7_emit_pipeline_flush(&cmds);
gen7_emit_state_base_address(&cmds, descriptors);
gen7_emit_pipeline_invalidate(&cmds);
/* Set the clear-residual kernel state */
gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0);
gen7_emit_interface_descriptor_load(&cmds, descriptors, desc_count);
/* Execute the kernel on all HW threads */
for (i = 0; i < num_primitives(bv); i++)
gen7_emit_media_object(&cmds, i);
batch_add(&cmds, MI_BATCH_BUFFER_END);
}
int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
struct i915_vma * const vma)
{
struct batch_vals bv;
u32 *batch;
batch_get_defaults(engine->i915, &bv);
if (!vma)
return bv.size;
GEM_BUG_ON(vma->obj->base.size < bv.size);
batch = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
if (IS_ERR(batch))
return PTR_ERR(batch);
emit_batch(vma, memset(batch, 0, bv.size), &bv);
i915_gem_object_flush_map(vma->obj);
__i915_gem_object_release_map(vma->obj);
return 0;
}
| linux-master | drivers/gpu/drm/i915/gt/gen7_renderclear.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
#include <linux/bitmap.h>
#include <linux/string_helpers.h>
#include "i915_drv.h"
#include "intel_gt_debugfs.h"
#include "intel_gt_regs.h"
#include "intel_sseu_debugfs.h"
static void cherryview_sseu_device_status(struct intel_gt *gt,
struct sseu_dev_info *sseu)
{
#define SS_MAX 2
struct intel_uncore *uncore = gt->uncore;
const int ss_max = SS_MAX;
u32 sig1[SS_MAX], sig2[SS_MAX];
int ss;
sig1[0] = intel_uncore_read(uncore, CHV_POWER_SS0_SIG1);
sig1[1] = intel_uncore_read(uncore, CHV_POWER_SS1_SIG1);
sig2[0] = intel_uncore_read(uncore, CHV_POWER_SS0_SIG2);
sig2[1] = intel_uncore_read(uncore, CHV_POWER_SS1_SIG2);
for (ss = 0; ss < ss_max; ss++) {
unsigned int eu_cnt;
if (sig1[ss] & CHV_SS_PG_ENABLE)
/* skip disabled subslice */
continue;
sseu->slice_mask = BIT(0);
sseu->subslice_mask.hsw[0] |= BIT(ss);
eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
sseu->eu_total += eu_cnt;
sseu->eu_per_subslice = max_t(unsigned int,
sseu->eu_per_subslice, eu_cnt);
}
#undef SS_MAX
}
static void gen11_sseu_device_status(struct intel_gt *gt,
struct sseu_dev_info *sseu)
{
#define SS_MAX 8
struct intel_uncore *uncore = gt->uncore;
const struct intel_gt_info *info = >->info;
u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
int s, ss;
for (s = 0; s < info->sseu.max_slices; s++) {
/*
* FIXME: Valid SS Mask respects the spec and read
* only valid bits for those registers, excluding reserved
* although this seems wrong because it would leave many
* subslices without ACK.
*/
s_reg[s] = intel_uncore_read(uncore, GEN10_SLICE_PGCTL_ACK(s)) &
GEN10_PGCTL_VALID_SS_MASK(s);
eu_reg[2 * s] = intel_uncore_read(uncore,
GEN10_SS01_EU_PGCTL_ACK(s));
eu_reg[2 * s + 1] = intel_uncore_read(uncore,
GEN10_SS23_EU_PGCTL_ACK(s));
}
eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
GEN9_PGCTL_SSA_EU19_ACK |
GEN9_PGCTL_SSA_EU210_ACK |
GEN9_PGCTL_SSA_EU311_ACK;
eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
GEN9_PGCTL_SSB_EU19_ACK |
GEN9_PGCTL_SSB_EU210_ACK |
GEN9_PGCTL_SSB_EU311_ACK;
for (s = 0; s < info->sseu.max_slices; s++) {
if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
/* skip disabled slice */
continue;
sseu->slice_mask |= BIT(s);
sseu->subslice_mask.hsw[s] = info->sseu.subslice_mask.hsw[s];
for (ss = 0; ss < info->sseu.max_subslices; ss++) {
unsigned int eu_cnt;
if (info->sseu.has_subslice_pg &&
!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
/* skip disabled subslice */
continue;
eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
eu_mask[ss % 2]);
sseu->eu_total += eu_cnt;
sseu->eu_per_subslice = max_t(unsigned int,
sseu->eu_per_subslice,
eu_cnt);
}
}
#undef SS_MAX
}
static void gen9_sseu_device_status(struct intel_gt *gt,
struct sseu_dev_info *sseu)
{
#define SS_MAX 3
struct intel_uncore *uncore = gt->uncore;
const struct intel_gt_info *info = >->info;
u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
int s, ss;
for (s = 0; s < info->sseu.max_slices; s++) {
s_reg[s] = intel_uncore_read(uncore, GEN9_SLICE_PGCTL_ACK(s));
eu_reg[2 * s] =
intel_uncore_read(uncore, GEN9_SS01_EU_PGCTL_ACK(s));
eu_reg[2 * s + 1] =
intel_uncore_read(uncore, GEN9_SS23_EU_PGCTL_ACK(s));
}
eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
GEN9_PGCTL_SSA_EU19_ACK |
GEN9_PGCTL_SSA_EU210_ACK |
GEN9_PGCTL_SSA_EU311_ACK;
eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
GEN9_PGCTL_SSB_EU19_ACK |
GEN9_PGCTL_SSB_EU210_ACK |
GEN9_PGCTL_SSB_EU311_ACK;
for (s = 0; s < info->sseu.max_slices; s++) {
if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
/* skip disabled slice */
continue;
sseu->slice_mask |= BIT(s);
if (IS_GEN9_BC(gt->i915))
sseu->subslice_mask.hsw[s] = info->sseu.subslice_mask.hsw[s];
for (ss = 0; ss < info->sseu.max_subslices; ss++) {
unsigned int eu_cnt;
if (IS_GEN9_LP(gt->i915)) {
if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
/* skip disabled subslice */
continue;
sseu->subslice_mask.hsw[s] |= BIT(ss);
}
eu_cnt = eu_reg[2 * s + ss / 2] & eu_mask[ss % 2];
eu_cnt = 2 * hweight32(eu_cnt);
sseu->eu_total += eu_cnt;
sseu->eu_per_subslice = max_t(unsigned int,
sseu->eu_per_subslice,
eu_cnt);
}
}
#undef SS_MAX
}
static void bdw_sseu_device_status(struct intel_gt *gt,
struct sseu_dev_info *sseu)
{
const struct intel_gt_info *info = >->info;
u32 slice_info = intel_uncore_read(gt->uncore, GEN8_GT_SLICE_INFO);
int s;
sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
if (sseu->slice_mask) {
sseu->eu_per_subslice = info->sseu.eu_per_subslice;
for (s = 0; s < fls(sseu->slice_mask); s++)
sseu->subslice_mask.hsw[s] = info->sseu.subslice_mask.hsw[s];
sseu->eu_total = sseu->eu_per_subslice *
intel_sseu_subslice_total(sseu);
/* subtract fused off EU(s) from enabled slice(s) */
for (s = 0; s < fls(sseu->slice_mask); s++) {
u8 subslice_7eu = info->sseu.subslice_7eu[s];
sseu->eu_total -= hweight8(subslice_7eu);
}
}
}
static void i915_print_sseu_info(struct seq_file *m,
bool is_available_info,
bool has_pooled_eu,
const struct sseu_dev_info *sseu)
{
const char *type = is_available_info ? "Available" : "Enabled";
seq_printf(m, " %s Slice Mask: %04x\n", type,
sseu->slice_mask);
seq_printf(m, " %s Slice Total: %u\n", type,
hweight8(sseu->slice_mask));
seq_printf(m, " %s Subslice Total: %u\n", type,
intel_sseu_subslice_total(sseu));
intel_sseu_print_ss_info(type, sseu, m);
seq_printf(m, " %s EU Total: %u\n", type,
sseu->eu_total);
seq_printf(m, " %s EU Per Subslice: %u\n", type,
sseu->eu_per_subslice);
if (!is_available_info)
return;
seq_printf(m, " Has Pooled EU: %s\n", str_yes_no(has_pooled_eu));
if (has_pooled_eu)
seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
seq_printf(m, " Has Slice Power Gating: %s\n",
str_yes_no(sseu->has_slice_pg));
seq_printf(m, " Has Subslice Power Gating: %s\n",
str_yes_no(sseu->has_subslice_pg));
seq_printf(m, " Has EU Power Gating: %s\n",
str_yes_no(sseu->has_eu_pg));
}
/*
* this is called from top-level debugfs as well, so we can't get the gt from
* the seq_file.
*/
int intel_sseu_status(struct seq_file *m, struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
const struct intel_gt_info *info = >->info;
struct sseu_dev_info *sseu;
intel_wakeref_t wakeref;
if (GRAPHICS_VER(i915) < 8)
return -ENODEV;
seq_puts(m, "SSEU Device Info\n");
i915_print_sseu_info(m, true, HAS_POOLED_EU(i915), &info->sseu);
seq_puts(m, "SSEU Device Status\n");
sseu = kzalloc(sizeof(*sseu), GFP_KERNEL);
if (!sseu)
return -ENOMEM;
intel_sseu_set_info(sseu, info->sseu.max_slices,
info->sseu.max_subslices,
info->sseu.max_eus_per_subslice);
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
if (IS_CHERRYVIEW(i915))
cherryview_sseu_device_status(gt, sseu);
else if (IS_BROADWELL(i915))
bdw_sseu_device_status(gt, sseu);
else if (GRAPHICS_VER(i915) == 9)
gen9_sseu_device_status(gt, sseu);
else if (GRAPHICS_VER(i915) >= 11)
gen11_sseu_device_status(gt, sseu);
}
i915_print_sseu_info(m, false, HAS_POOLED_EU(i915), sseu);
kfree(sseu);
return 0;
}
static int sseu_status_show(struct seq_file *m, void *unused)
{
struct intel_gt *gt = m->private;
return intel_sseu_status(m, gt);
}
DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(sseu_status);
static int sseu_topology_show(struct seq_file *m, void *unused)
{
struct intel_gt *gt = m->private;
struct drm_printer p = drm_seq_file_printer(m);
intel_sseu_print_topology(gt->i915, >->info.sseu, &p);
return 0;
}
DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(sseu_topology);
void intel_sseu_debugfs_register(struct intel_gt *gt, struct dentry *root)
{
static const struct intel_gt_debugfs_file files[] = {
{ "sseu_status", &sseu_status_fops, NULL },
{ "sseu_topology", &sseu_topology_fops, NULL },
};
intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
}
| linux-master | drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
#include "gen6_engine_cs.h"
#include "intel_engine.h"
#include "intel_engine_regs.h"
#include "intel_gpu_commands.h"
#include "intel_gt.h"
#include "intel_gt_irq.h"
#include "intel_gt_pm_irq.h"
#include "intel_ring.h"
#define HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH * sizeof(u32))
/*
* Emits a PIPE_CONTROL with a non-zero post-sync operation, for
* implementing two workarounds on gen6. From section 1.4.7.1
* "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
*
* [DevSNB-C+{W/A}] Before any depth stall flush (including those
* produced by non-pipelined state commands), software needs to first
* send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
* 0.
*
* [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
* =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
*
* And the workaround for these two requires this workaround first:
*
* [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
* BEFORE the pipe-control with a post-sync op and no write-cache
* flushes.
*
* And this last workaround is tricky because of the requirements on
* that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
* volume 2 part 1:
*
* "1 of the following must also be set:
* - Render Target Cache Flush Enable ([12] of DW1)
* - Depth Cache Flush Enable ([0] of DW1)
* - Stall at Pixel Scoreboard ([1] of DW1)
* - Depth Stall ([13] of DW1)
* - Post-Sync Operation ([13] of DW1)
* - Notify Enable ([8] of DW1)"
*
* The cache flushes require the workaround flush that triggered this
* one, so we can't use it. Depth stall would trigger the same.
* Post-sync nonzero is what triggered this second workaround, so we
* can't use that one either. Notify enable is IRQs, which aren't
* really our business. That leaves only stall at scoreboard.
*/
static int
gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
{
u32 scratch_addr =
intel_gt_scratch_offset(rq->engine->gt,
INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
u32 *cs;
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = GFX_OP_PIPE_CONTROL(5);
*cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
*cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0; /* low dword */
*cs++ = 0; /* high dword */
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = GFX_OP_PIPE_CONTROL(5);
*cs++ = PIPE_CONTROL_QW_WRITE;
*cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
*cs++ = 0;
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
return 0;
}
int gen6_emit_flush_rcs(struct i915_request *rq, u32 mode)
{
u32 scratch_addr =
intel_gt_scratch_offset(rq->engine->gt,
INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
u32 *cs, flags = 0;
int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
ret = gen6_emit_post_sync_nonzero_flush(rq);
if (ret)
return ret;
/*
* Just flush everything. Experiments have shown that reducing the
* number of bits based on the write domains has little performance
* impact. And when rearranging requests, the order of flushes is
* unknown.
*/
if (mode & EMIT_FLUSH) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
/*
* Ensure that any following seqno writes only happen
* when the render cache is indeed flushed.
*/
flags |= PIPE_CONTROL_CS_STALL;
}
if (mode & EMIT_INVALIDATE) {
flags |= PIPE_CONTROL_TLB_INVALIDATE;
flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
/*
* TLB invalidate requires a post-sync write.
*/
flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
}
cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = flags;
*cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
intel_ring_advance(rq, cs);
return 0;
}
u32 *gen6_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
{
/* First we do the gen6_emit_post_sync_nonzero_flush w/a */
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
*cs++ = 0;
*cs++ = 0;
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = PIPE_CONTROL_QW_WRITE;
*cs++ = intel_gt_scratch_offset(rq->engine->gt,
INTEL_GT_SCRATCH_FIELD_DEFAULT) |
PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
/* Finally we can flush and with it emit the breadcrumb */
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
PIPE_CONTROL_DC_FLUSH_ENABLE |
PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_CS_STALL);
*cs++ = i915_request_active_seqno(rq) |
PIPE_CONTROL_GLOBAL_GTT;
*cs++ = rq->fence.seqno;
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
return cs;
}
static int mi_flush_dw(struct i915_request *rq, u32 flags)
{
u32 cmd, *cs;
cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
cmd = MI_FLUSH_DW;
/*
* We always require a command barrier so that subsequent
* commands, such as breadcrumb interrupts, are strictly ordered
* wrt the contents of the write cache being flushed to memory
* (and thus being coherent from the CPU).
*/
cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
/*
* Bspec vol 1c.3 - blitter engine command streamer:
* "If ENABLED, all TLBs will be invalidated once the flush
* operation is complete. This bit is only valid when the
* Post-Sync Operation field is a value of 1h or 3h."
*/
cmd |= flags;
*cs++ = cmd;
*cs++ = HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
*cs++ = 0;
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
return 0;
}
static int gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags)
{
return mi_flush_dw(rq, mode & EMIT_INVALIDATE ? invflags : 0);
}
int gen6_emit_flush_xcs(struct i915_request *rq, u32 mode)
{
return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB);
}
int gen6_emit_flush_vcs(struct i915_request *rq, u32 mode)
{
return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB | MI_INVALIDATE_BSD);
}
int gen6_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
u32 security;
u32 *cs;
security = MI_BATCH_NON_SECURE_I965;
if (dispatch_flags & I915_DISPATCH_SECURE)
security = 0;
cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
cs = __gen6_emit_bb_start(cs, offset, security);
intel_ring_advance(rq, cs);
return 0;
}
int
hsw_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
u32 security;
u32 *cs;
security = MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW;
if (dispatch_flags & I915_DISPATCH_SECURE)
security = 0;
cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
cs = __gen6_emit_bb_start(cs, offset, security);
intel_ring_advance(rq, cs);
return 0;
}
static int gen7_stall_cs(struct i915_request *rq)
{
u32 *cs;
cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
*cs++ = 0;
*cs++ = 0;
intel_ring_advance(rq, cs);
return 0;
}
int gen7_emit_flush_rcs(struct i915_request *rq, u32 mode)
{
u32 scratch_addr =
intel_gt_scratch_offset(rq->engine->gt,
INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
u32 *cs, flags = 0;
/*
* Ensure that any following seqno writes only happen when the render
* cache is indeed flushed.
*
* Workaround: 4th PIPE_CONTROL command (except the ones with only
* read-cache invalidate bits set) must have the CS_STALL bit set. We
* don't try to be clever and just set it unconditionally.
*/
flags |= PIPE_CONTROL_CS_STALL;
/*
* CS_STALL suggests at least a post-sync write.
*/
flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
/*
* Just flush everything. Experiments have shown that reducing the
* number of bits based on the write domains has little performance
* impact.
*/
if (mode & EMIT_FLUSH) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
flags |= PIPE_CONTROL_FLUSH_ENABLE;
}
if (mode & EMIT_INVALIDATE) {
flags |= PIPE_CONTROL_TLB_INVALIDATE;
flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
/*
* Workaround: we must issue a pipe_control with CS-stall bit
* set before a pipe_control command that has the state cache
* invalidate bit set.
*/
gen7_stall_cs(rq);
}
cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = flags;
*cs++ = scratch_addr;
*cs++ = 0;
intel_ring_advance(rq, cs);
return 0;
}
u32 *gen7_emit_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
{
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
PIPE_CONTROL_DC_FLUSH_ENABLE |
PIPE_CONTROL_FLUSH_ENABLE |
PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL);
*cs++ = i915_request_active_seqno(rq);
*cs++ = rq->fence.seqno;
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
return cs;
}
u32 *gen6_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
{
GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
GEM_BUG_ON(offset_in_page(rq->hwsp_seqno) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
*cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
*cs++ = rq->fence.seqno;
*cs++ = MI_USER_INTERRUPT;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
return cs;
}
#define GEN7_XCS_WA 32
u32 *gen7_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
{
int i;
GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
GEM_BUG_ON(offset_in_page(rq->hwsp_seqno) != I915_GEM_HWS_SEQNO_ADDR);
*cs++ = MI_FLUSH_DW | MI_INVALIDATE_TLB |
MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
*cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
*cs++ = rq->fence.seqno;
for (i = 0; i < GEN7_XCS_WA; i++) {
*cs++ = MI_STORE_DWORD_INDEX;
*cs++ = I915_GEM_HWS_SEQNO_ADDR;
*cs++ = rq->fence.seqno;
}
*cs++ = MI_FLUSH_DW;
*cs++ = 0;
*cs++ = 0;
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
return cs;
}
#undef GEN7_XCS_WA
void gen6_irq_enable(struct intel_engine_cs *engine)
{
ENGINE_WRITE(engine, RING_IMR,
~(engine->irq_enable_mask | engine->irq_keep_mask));
/* Flush/delay to ensure the RING_IMR is active before the GT IMR */
ENGINE_POSTING_READ(engine, RING_IMR);
gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask);
}
void gen6_irq_disable(struct intel_engine_cs *engine)
{
ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask);
}
void hsw_irq_enable_vecs(struct intel_engine_cs *engine)
{
ENGINE_WRITE(engine, RING_IMR, ~engine->irq_enable_mask);
/* Flush/delay to ensure the RING_IMR is active before the GT IMR */
ENGINE_POSTING_READ(engine, RING_IMR);
gen6_gt_pm_unmask_irq(engine->gt, engine->irq_enable_mask);
}
void hsw_irq_disable_vecs(struct intel_engine_cs *engine)
{
ENGINE_WRITE(engine, RING_IMR, ~0);
gen6_gt_pm_mask_irq(engine->gt, engine->irq_enable_mask);
}
| linux-master | drivers/gpu/drm/i915/gt/gen6_engine_cs.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2018 Intel Corporation
*/
#include <linux/prime_numbers.h>
#include "gem/i915_gem_internal.h"
#include "i915_selftest.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
#include "intel_reset.h"
#include "intel_ring.h"
#include "selftest_engine_heartbeat.h"
#include "selftests/i915_random.h"
#include "selftests/igt_flush_test.h"
#include "selftests/igt_live_test.h"
#include "selftests/igt_spinner.h"
#include "selftests/lib_sw_fence.h"
#include "shmem_utils.h"
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
#define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4)
#define NUM_GPR 16
#define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */
#define LRI_HEADER MI_INSTR(0x22, 0)
#define LRI_LENGTH_MASK GENMASK(7, 0)
static struct i915_vma *create_scratch(struct intel_gt *gt)
{
return __vm_create_scratch_for_read_pinned(>->ggtt->vm, PAGE_SIZE);
}
static bool is_active(struct i915_request *rq)
{
if (i915_request_is_active(rq))
return true;
if (i915_request_on_hold(rq))
return true;
if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq))
return true;
return false;
}
static int wait_for_submit(struct intel_engine_cs *engine,
struct i915_request *rq,
unsigned long timeout)
{
/* Ignore our own attempts to suppress excess tasklets */
tasklet_hi_schedule(&engine->sched_engine->tasklet);
timeout += jiffies;
do {
bool done = time_after(jiffies, timeout);
if (i915_request_completed(rq)) /* that was quick! */
return 0;
/* Wait until the HW has acknowleged the submission (or err) */
intel_engine_flush_submission(engine);
if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
return 0;
if (done)
return -ETIME;
cond_resched();
} while (1);
}
static int emit_semaphore_signal(struct intel_context *ce, void *slot)
{
const u32 offset =
i915_ggtt_offset(ce->engine->status_page.vma) +
offset_in_page(slot);
struct i915_request *rq;
u32 *cs;
rq = intel_context_create_request(ce);
if (IS_ERR(rq))
return PTR_ERR(rq);
cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs)) {
i915_request_add(rq);
return PTR_ERR(cs);
}
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = offset;
*cs++ = 0;
*cs++ = 1;
intel_ring_advance(rq, cs);
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
i915_request_add(rq);
return 0;
}
static int context_flush(struct intel_context *ce, long timeout)
{
struct i915_request *rq;
struct dma_fence *fence;
int err = 0;
rq = intel_engine_create_kernel_request(ce->engine);
if (IS_ERR(rq))
return PTR_ERR(rq);
fence = i915_active_fence_get(&ce->timeline->last_request);
if (fence) {
i915_request_await_dma_fence(rq, fence);
dma_fence_put(fence);
}
rq = i915_request_get(rq);
i915_request_add(rq);
if (i915_request_wait(rq, 0, timeout) < 0)
err = -ETIME;
i915_request_put(rq);
rmb(); /* We know the request is written, make sure all state is too! */
return err;
}
static int get_lri_mask(struct intel_engine_cs *engine, u32 lri)
{
if ((lri & MI_LRI_LRM_CS_MMIO) == 0)
return ~0u;
if (GRAPHICS_VER(engine->i915) < 12)
return 0xfff;
switch (engine->class) {
default:
case RENDER_CLASS:
case COMPUTE_CLASS:
return 0x07ff;
case COPY_ENGINE_CLASS:
return 0x0fff;
case VIDEO_DECODE_CLASS:
case VIDEO_ENHANCEMENT_CLASS:
return 0x3fff;
}
}
static int live_lrc_layout(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
u32 *lrc;
int err;
/*
* Check the registers offsets we use to create the initial reg state
* match the layout saved by HW.
*/
lrc = (u32 *)__get_free_page(GFP_KERNEL); /* requires page alignment */
if (!lrc)
return -ENOMEM;
GEM_BUG_ON(offset_in_page(lrc));
err = 0;
for_each_engine(engine, gt, id) {
u32 *hw;
int dw;
if (!engine->default_state)
continue;
hw = shmem_pin_map(engine->default_state);
if (!hw) {
err = -ENOMEM;
break;
}
hw += LRC_STATE_OFFSET / sizeof(*hw);
__lrc_init_regs(memset(lrc, POISON_INUSE, PAGE_SIZE),
engine->kernel_context, engine, true);
dw = 0;
do {
u32 lri = READ_ONCE(hw[dw]);
u32 lri_mask;
if (lri == 0) {
dw++;
continue;
}
if (lrc[dw] == 0) {
pr_debug("%s: skipped instruction %x at dword %d\n",
engine->name, lri, dw);
dw++;
continue;
}
if ((lri & GENMASK(31, 23)) != LRI_HEADER) {
pr_err("%s: Expected LRI command at dword %d, found %08x\n",
engine->name, dw, lri);
err = -EINVAL;
break;
}
if (lrc[dw] != lri) {
pr_err("%s: LRI command mismatch at dword %d, expected %08x found %08x\n",
engine->name, dw, lri, lrc[dw]);
err = -EINVAL;
break;
}
/*
* When bit 19 of MI_LOAD_REGISTER_IMM instruction
* opcode is set on Gen12+ devices, HW does not
* care about certain register address offsets, and
* instead check the following for valid address
* ranges on specific engines:
* RCS && CCS: BITS(0 - 10)
* BCS: BITS(0 - 11)
* VECS && VCS: BITS(0 - 13)
*/
lri_mask = get_lri_mask(engine, lri);
lri &= 0x7f;
lri++;
dw++;
while (lri) {
u32 offset = READ_ONCE(hw[dw]);
if ((offset ^ lrc[dw]) & lri_mask) {
pr_err("%s: Different registers found at dword %d, expected %x, found %x\n",
engine->name, dw, offset, lrc[dw]);
err = -EINVAL;
break;
}
/*
* Skip over the actual register value as we
* expect that to differ.
*/
dw += 2;
lri -= 2;
}
} while (!err && (lrc[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
if (err) {
pr_info("%s: HW register image:\n", engine->name);
igt_hexdump(hw, PAGE_SIZE);
pr_info("%s: SW register image:\n", engine->name);
igt_hexdump(lrc, PAGE_SIZE);
}
shmem_unpin_map(engine->default_state, hw);
if (err)
break;
}
free_page((unsigned long)lrc);
return err;
}
static int find_offset(const u32 *lri, u32 offset)
{
int i;
for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
if (lri[i] == offset)
return i;
return -1;
}
static int live_lrc_fixed(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = 0;
/*
* Check the assumed register offsets match the actual locations in
* the context image.
*/
for_each_engine(engine, gt, id) {
const struct {
u32 reg;
u32 offset;
const char *name;
} tbl[] = {
{
i915_mmio_reg_offset(RING_START(engine->mmio_base)),
CTX_RING_START - 1,
"RING_START"
},
{
i915_mmio_reg_offset(RING_CTL(engine->mmio_base)),
CTX_RING_CTL - 1,
"RING_CTL"
},
{
i915_mmio_reg_offset(RING_HEAD(engine->mmio_base)),
CTX_RING_HEAD - 1,
"RING_HEAD"
},
{
i915_mmio_reg_offset(RING_TAIL(engine->mmio_base)),
CTX_RING_TAIL - 1,
"RING_TAIL"
},
{
i915_mmio_reg_offset(RING_MI_MODE(engine->mmio_base)),
lrc_ring_mi_mode(engine),
"RING_MI_MODE"
},
{
i915_mmio_reg_offset(RING_BBSTATE(engine->mmio_base)),
CTX_BB_STATE - 1,
"BB_STATE"
},
{
i915_mmio_reg_offset(RING_BB_PER_CTX_PTR(engine->mmio_base)),
lrc_ring_wa_bb_per_ctx(engine),
"RING_BB_PER_CTX_PTR"
},
{
i915_mmio_reg_offset(RING_INDIRECT_CTX(engine->mmio_base)),
lrc_ring_indirect_ptr(engine),
"RING_INDIRECT_CTX_PTR"
},
{
i915_mmio_reg_offset(RING_INDIRECT_CTX_OFFSET(engine->mmio_base)),
lrc_ring_indirect_offset(engine),
"RING_INDIRECT_CTX_OFFSET"
},
{
i915_mmio_reg_offset(RING_CTX_TIMESTAMP(engine->mmio_base)),
CTX_TIMESTAMP - 1,
"RING_CTX_TIMESTAMP"
},
{
i915_mmio_reg_offset(GEN8_RING_CS_GPR(engine->mmio_base, 0)),
lrc_ring_gpr0(engine),
"RING_CS_GPR0"
},
{
i915_mmio_reg_offset(RING_CMD_BUF_CCTL(engine->mmio_base)),
lrc_ring_cmd_buf_cctl(engine),
"RING_CMD_BUF_CCTL"
},
{
i915_mmio_reg_offset(RING_BB_OFFSET(engine->mmio_base)),
lrc_ring_bb_offset(engine),
"RING_BB_OFFSET"
},
{ },
}, *t;
u32 *hw;
if (!engine->default_state)
continue;
hw = shmem_pin_map(engine->default_state);
if (!hw) {
err = -ENOMEM;
break;
}
hw += LRC_STATE_OFFSET / sizeof(*hw);
for (t = tbl; t->name; t++) {
int dw = find_offset(hw, t->reg);
if (dw != t->offset) {
pr_err("%s: Offset for %s [0x%x] mismatch, found %x, expected %x\n",
engine->name,
t->name,
t->reg,
dw,
t->offset);
err = -EINVAL;
}
}
shmem_unpin_map(engine->default_state, hw);
}
return err;
}
static int __live_lrc_state(struct intel_engine_cs *engine,
struct i915_vma *scratch)
{
struct intel_context *ce;
struct i915_request *rq;
struct i915_gem_ww_ctx ww;
enum {
RING_START_IDX = 0,
RING_TAIL_IDX,
MAX_IDX
};
u32 expected[MAX_IDX];
u32 *cs;
int err;
int n;
ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
i915_gem_ww_ctx_init(&ww, false);
retry:
err = i915_gem_object_lock(scratch->obj, &ww);
if (!err)
err = intel_context_pin_ww(ce, &ww);
if (err)
goto err_put;
rq = i915_request_create(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_unpin;
}
cs = intel_ring_begin(rq, 4 * MAX_IDX);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
i915_request_add(rq);
goto err_unpin;
}
*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
*cs++ = i915_mmio_reg_offset(RING_START(engine->mmio_base));
*cs++ = i915_ggtt_offset(scratch) + RING_START_IDX * sizeof(u32);
*cs++ = 0;
expected[RING_START_IDX] = i915_ggtt_offset(ce->ring->vma);
*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
*cs++ = i915_mmio_reg_offset(RING_TAIL(engine->mmio_base));
*cs++ = i915_ggtt_offset(scratch) + RING_TAIL_IDX * sizeof(u32);
*cs++ = 0;
err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
i915_request_get(rq);
i915_request_add(rq);
if (err)
goto err_rq;
intel_engine_flush_submission(engine);
expected[RING_TAIL_IDX] = ce->ring->tail;
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
err = -ETIME;
goto err_rq;
}
cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err_rq;
}
for (n = 0; n < MAX_IDX; n++) {
if (cs[n] != expected[n]) {
pr_err("%s: Stored register[%d] value[0x%x] did not match expected[0x%x]\n",
engine->name, n, cs[n], expected[n]);
err = -EINVAL;
break;
}
}
i915_gem_object_unpin_map(scratch->obj);
err_rq:
i915_request_put(rq);
err_unpin:
intel_context_unpin(ce);
err_put:
if (err == -EDEADLK) {
err = i915_gem_ww_ctx_backoff(&ww);
if (!err)
goto retry;
}
i915_gem_ww_ctx_fini(&ww);
intel_context_put(ce);
return err;
}
static int live_lrc_state(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
struct i915_vma *scratch;
enum intel_engine_id id;
int err = 0;
/*
* Check the live register state matches what we expect for this
* intel_context.
*/
scratch = create_scratch(gt);
if (IS_ERR(scratch))
return PTR_ERR(scratch);
for_each_engine(engine, gt, id) {
err = __live_lrc_state(engine, scratch);
if (err)
break;
}
if (igt_flush_test(gt->i915))
err = -EIO;
i915_vma_unpin_and_release(&scratch, 0);
return err;
}
static int gpr_make_dirty(struct intel_context *ce)
{
struct i915_request *rq;
u32 *cs;
int n;
rq = intel_context_create_request(ce);
if (IS_ERR(rq))
return PTR_ERR(rq);
cs = intel_ring_begin(rq, 2 * NUM_GPR_DW + 2);
if (IS_ERR(cs)) {
i915_request_add(rq);
return PTR_ERR(cs);
}
*cs++ = MI_LOAD_REGISTER_IMM(NUM_GPR_DW);
for (n = 0; n < NUM_GPR_DW; n++) {
*cs++ = CS_GPR(ce->engine, n);
*cs++ = STACK_MAGIC;
}
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
i915_request_add(rq);
return 0;
}
static struct i915_request *
__gpr_read(struct intel_context *ce, struct i915_vma *scratch, u32 *slot)
{
const u32 offset =
i915_ggtt_offset(ce->engine->status_page.vma) +
offset_in_page(slot);
struct i915_request *rq;
u32 *cs;
int err;
int n;
rq = intel_context_create_request(ce);
if (IS_ERR(rq))
return rq;
cs = intel_ring_begin(rq, 6 + 4 * NUM_GPR_DW);
if (IS_ERR(cs)) {
i915_request_add(rq);
return ERR_CAST(cs);
}
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
*cs++ = MI_NOOP;
*cs++ = MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_NEQ_SDD;
*cs++ = 0;
*cs++ = offset;
*cs++ = 0;
for (n = 0; n < NUM_GPR_DW; n++) {
*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
*cs++ = CS_GPR(ce->engine, n);
*cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
*cs++ = 0;
}
err = igt_vma_move_to_active_unlocked(scratch, rq, EXEC_OBJECT_WRITE);
i915_request_get(rq);
i915_request_add(rq);
if (err) {
i915_request_put(rq);
rq = ERR_PTR(err);
}
return rq;
}
static int __live_lrc_gpr(struct intel_engine_cs *engine,
struct i915_vma *scratch,
bool preempt)
{
u32 *slot = memset32(engine->status_page.addr + 1000, 0, 4);
struct intel_context *ce;
struct i915_request *rq;
u32 *cs;
int err;
int n;
if (GRAPHICS_VER(engine->i915) < 9 && engine->class != RENDER_CLASS)
return 0; /* GPR only on rcs0 for gen8 */
err = gpr_make_dirty(engine->kernel_context);
if (err)
return err;
ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
rq = __gpr_read(ce, scratch, slot);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_put;
}
err = wait_for_submit(engine, rq, HZ / 2);
if (err)
goto err_rq;
if (preempt) {
err = gpr_make_dirty(engine->kernel_context);
if (err)
goto err_rq;
err = emit_semaphore_signal(engine->kernel_context, slot);
if (err)
goto err_rq;
err = wait_for_submit(engine, rq, HZ / 2);
if (err)
goto err_rq;
} else {
slot[0] = 1;
wmb();
}
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
err = -ETIME;
goto err_rq;
}
cs = i915_gem_object_pin_map_unlocked(scratch->obj, I915_MAP_WB);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err_rq;
}
for (n = 0; n < NUM_GPR_DW; n++) {
if (cs[n]) {
pr_err("%s: GPR[%d].%s was not zero, found 0x%08x!\n",
engine->name,
n / 2, n & 1 ? "udw" : "ldw",
cs[n]);
err = -EINVAL;
break;
}
}
i915_gem_object_unpin_map(scratch->obj);
err_rq:
memset32(&slot[0], -1, 4);
wmb();
i915_request_put(rq);
err_put:
intel_context_put(ce);
return err;
}
static int live_lrc_gpr(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
struct i915_vma *scratch;
enum intel_engine_id id;
int err = 0;
/*
* Check that GPR registers are cleared in new contexts as we need
* to avoid leaking any information from previous contexts.
*/
scratch = create_scratch(gt);
if (IS_ERR(scratch))
return PTR_ERR(scratch);
for_each_engine(engine, gt, id) {
st_engine_heartbeat_disable(engine);
err = __live_lrc_gpr(engine, scratch, false);
if (err)
goto err;
err = __live_lrc_gpr(engine, scratch, true);
if (err)
goto err;
err:
st_engine_heartbeat_enable(engine);
if (igt_flush_test(gt->i915))
err = -EIO;
if (err)
break;
}
i915_vma_unpin_and_release(&scratch, 0);
return err;
}
static struct i915_request *
create_timestamp(struct intel_context *ce, void *slot, int idx)
{
const u32 offset =
i915_ggtt_offset(ce->engine->status_page.vma) +
offset_in_page(slot);
struct i915_request *rq;
u32 *cs;
int err;
rq = intel_context_create_request(ce);
if (IS_ERR(rq))
return rq;
cs = intel_ring_begin(rq, 10);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err;
}
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
*cs++ = MI_NOOP;
*cs++ = MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_NEQ_SDD;
*cs++ = 0;
*cs++ = offset;
*cs++ = 0;
*cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
*cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(rq->engine->mmio_base));
*cs++ = offset + idx * sizeof(u32);
*cs++ = 0;
intel_ring_advance(rq, cs);
err = 0;
err:
i915_request_get(rq);
i915_request_add(rq);
if (err) {
i915_request_put(rq);
return ERR_PTR(err);
}
return rq;
}
struct lrc_timestamp {
struct intel_engine_cs *engine;
struct intel_context *ce[2];
u32 poison;
};
static bool timestamp_advanced(u32 start, u32 end)
{
return (s32)(end - start) > 0;
}
static int __lrc_timestamp(const struct lrc_timestamp *arg, bool preempt)
{
u32 *slot = memset32(arg->engine->status_page.addr + 1000, 0, 4);
struct i915_request *rq;
u32 timestamp;
int err = 0;
arg->ce[0]->lrc_reg_state[CTX_TIMESTAMP] = arg->poison;
rq = create_timestamp(arg->ce[0], slot, 1);
if (IS_ERR(rq))
return PTR_ERR(rq);
err = wait_for_submit(rq->engine, rq, HZ / 2);
if (err)
goto err;
if (preempt) {
arg->ce[1]->lrc_reg_state[CTX_TIMESTAMP] = 0xdeadbeef;
err = emit_semaphore_signal(arg->ce[1], slot);
if (err)
goto err;
} else {
slot[0] = 1;
wmb();
}
/* And wait for switch to kernel (to save our context to memory) */
err = context_flush(arg->ce[0], HZ / 2);
if (err)
goto err;
if (!timestamp_advanced(arg->poison, slot[1])) {
pr_err("%s(%s): invalid timestamp on restore, context:%x, request:%x\n",
arg->engine->name, preempt ? "preempt" : "simple",
arg->poison, slot[1]);
err = -EINVAL;
}
timestamp = READ_ONCE(arg->ce[0]->lrc_reg_state[CTX_TIMESTAMP]);
if (!timestamp_advanced(slot[1], timestamp)) {
pr_err("%s(%s): invalid timestamp on save, request:%x, context:%x\n",
arg->engine->name, preempt ? "preempt" : "simple",
slot[1], timestamp);
err = -EINVAL;
}
err:
memset32(slot, -1, 4);
i915_request_put(rq);
return err;
}
static int live_lrc_timestamp(void *arg)
{
struct lrc_timestamp data = {};
struct intel_gt *gt = arg;
enum intel_engine_id id;
const u32 poison[] = {
0,
S32_MAX,
(u32)S32_MAX + 1,
U32_MAX,
};
/*
* We want to verify that the timestamp is saved and restore across
* context switches and is monotonic.
*
* So we do this with a little bit of LRC poisoning to check various
* boundary conditions, and see what happens if we preempt the context
* with a second request (carrying more poison into the timestamp).
*/
for_each_engine(data.engine, gt, id) {
int i, err = 0;
st_engine_heartbeat_disable(data.engine);
for (i = 0; i < ARRAY_SIZE(data.ce); i++) {
struct intel_context *tmp;
tmp = intel_context_create(data.engine);
if (IS_ERR(tmp)) {
err = PTR_ERR(tmp);
goto err;
}
err = intel_context_pin(tmp);
if (err) {
intel_context_put(tmp);
goto err;
}
data.ce[i] = tmp;
}
for (i = 0; i < ARRAY_SIZE(poison); i++) {
data.poison = poison[i];
err = __lrc_timestamp(&data, false);
if (err)
break;
err = __lrc_timestamp(&data, true);
if (err)
break;
}
err:
st_engine_heartbeat_enable(data.engine);
for (i = 0; i < ARRAY_SIZE(data.ce); i++) {
if (!data.ce[i])
break;
intel_context_unpin(data.ce[i]);
intel_context_put(data.ce[i]);
}
if (igt_flush_test(gt->i915))
err = -EIO;
if (err)
return err;
}
return 0;
}
static struct i915_vma *
create_user_vma(struct i915_address_space *vm, unsigned long size)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int err;
obj = i915_gem_object_create_internal(vm->i915, size);
if (IS_ERR(obj))
return ERR_CAST(obj);
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
i915_gem_object_put(obj);
return vma;
}
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err) {
i915_gem_object_put(obj);
return ERR_PTR(err);
}
return vma;
}
static u32 safe_poison(u32 offset, u32 poison)
{
/*
* Do not enable predication as it will nop all subsequent commands,
* not only disabling the tests (by preventing all the other SRM) but
* also preventing the arbitration events at the end of the request.
*/
if (offset == i915_mmio_reg_offset(RING_PREDICATE_RESULT(0)))
poison &= ~REG_BIT(0);
return poison;
}
static struct i915_vma *
store_context(struct intel_context *ce, struct i915_vma *scratch)
{
struct i915_vma *batch;
u32 dw, x, *cs, *hw;
u32 *defaults;
batch = create_user_vma(ce->vm, SZ_64K);
if (IS_ERR(batch))
return batch;
cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
if (IS_ERR(cs)) {
i915_vma_put(batch);
return ERR_CAST(cs);
}
defaults = shmem_pin_map(ce->engine->default_state);
if (!defaults) {
i915_gem_object_unpin_map(batch->obj);
i915_vma_put(batch);
return ERR_PTR(-ENOMEM);
}
x = 0;
dw = 0;
hw = defaults;
hw += LRC_STATE_OFFSET / sizeof(*hw);
do {
u32 len = hw[dw] & LRI_LENGTH_MASK;
/*
* Keep it simple, skip parsing complex commands
*
* At present, there are no more MI_LOAD_REGISTER_IMM
* commands after the first 3D state command. Rather
* than include a table (see i915_cmd_parser.c) of all
* the possible commands and their instruction lengths
* (or mask for variable length instructions), assume
* we have gathered the complete list of registers and
* bail out.
*/
if ((hw[dw] >> INSTR_CLIENT_SHIFT) != INSTR_MI_CLIENT)
break;
if (hw[dw] == 0) {
dw++;
continue;
}
if ((hw[dw] & GENMASK(31, 23)) != LRI_HEADER) {
/* Assume all other MI commands match LRI length mask */
dw += len + 2;
continue;
}
if (!len) {
pr_err("%s: invalid LRI found in context image\n",
ce->engine->name);
igt_hexdump(defaults, PAGE_SIZE);
break;
}
dw++;
len = (len + 1) / 2;
while (len--) {
*cs++ = MI_STORE_REGISTER_MEM_GEN8;
*cs++ = hw[dw];
*cs++ = lower_32_bits(i915_vma_offset(scratch) + x);
*cs++ = upper_32_bits(i915_vma_offset(scratch) + x);
dw += 2;
x += 4;
}
} while (dw < PAGE_SIZE / sizeof(u32) &&
(hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
*cs++ = MI_BATCH_BUFFER_END;
shmem_unpin_map(ce->engine->default_state, defaults);
i915_gem_object_flush_map(batch->obj);
i915_gem_object_unpin_map(batch->obj);
return batch;
}
static struct i915_request *
record_registers(struct intel_context *ce,
struct i915_vma *before,
struct i915_vma *after,
u32 *sema)
{
struct i915_vma *b_before, *b_after;
struct i915_request *rq;
u32 *cs;
int err;
b_before = store_context(ce, before);
if (IS_ERR(b_before))
return ERR_CAST(b_before);
b_after = store_context(ce, after);
if (IS_ERR(b_after)) {
rq = ERR_CAST(b_after);
goto err_before;
}
rq = intel_context_create_request(ce);
if (IS_ERR(rq))
goto err_after;
err = igt_vma_move_to_active_unlocked(before, rq, EXEC_OBJECT_WRITE);
if (err)
goto err_rq;
err = igt_vma_move_to_active_unlocked(b_before, rq, 0);
if (err)
goto err_rq;
err = igt_vma_move_to_active_unlocked(after, rq, EXEC_OBJECT_WRITE);
if (err)
goto err_rq;
err = igt_vma_move_to_active_unlocked(b_after, rq, 0);
if (err)
goto err_rq;
cs = intel_ring_begin(rq, 14);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err_rq;
}
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
*cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
*cs++ = lower_32_bits(i915_vma_offset(b_before));
*cs++ = upper_32_bits(i915_vma_offset(b_before));
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
*cs++ = MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_NEQ_SDD;
*cs++ = 0;
*cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
offset_in_page(sema);
*cs++ = 0;
*cs++ = MI_NOOP;
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
*cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
*cs++ = lower_32_bits(i915_vma_offset(b_after));
*cs++ = upper_32_bits(i915_vma_offset(b_after));
intel_ring_advance(rq, cs);
WRITE_ONCE(*sema, 0);
i915_request_get(rq);
i915_request_add(rq);
err_after:
i915_vma_put(b_after);
err_before:
i915_vma_put(b_before);
return rq;
err_rq:
i915_request_add(rq);
rq = ERR_PTR(err);
goto err_after;
}
static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
{
struct i915_vma *batch;
u32 dw, *cs, *hw;
u32 *defaults;
batch = create_user_vma(ce->vm, SZ_64K);
if (IS_ERR(batch))
return batch;
cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
if (IS_ERR(cs)) {
i915_vma_put(batch);
return ERR_CAST(cs);
}
defaults = shmem_pin_map(ce->engine->default_state);
if (!defaults) {
i915_gem_object_unpin_map(batch->obj);
i915_vma_put(batch);
return ERR_PTR(-ENOMEM);
}
dw = 0;
hw = defaults;
hw += LRC_STATE_OFFSET / sizeof(*hw);
do {
u32 len = hw[dw] & LRI_LENGTH_MASK;
/* For simplicity, break parsing at the first complex command */
if ((hw[dw] >> INSTR_CLIENT_SHIFT) != INSTR_MI_CLIENT)
break;
if (hw[dw] == 0) {
dw++;
continue;
}
if ((hw[dw] & GENMASK(31, 23)) != LRI_HEADER) {
dw += len + 2;
continue;
}
if (!len) {
pr_err("%s: invalid LRI found in context image\n",
ce->engine->name);
igt_hexdump(defaults, PAGE_SIZE);
break;
}
dw++;
len = (len + 1) / 2;
*cs++ = MI_LOAD_REGISTER_IMM(len);
while (len--) {
*cs++ = hw[dw];
*cs++ = safe_poison(hw[dw] & get_lri_mask(ce->engine,
MI_LRI_LRM_CS_MMIO),
poison);
dw += 2;
}
} while (dw < PAGE_SIZE / sizeof(u32) &&
(hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
*cs++ = MI_BATCH_BUFFER_END;
shmem_unpin_map(ce->engine->default_state, defaults);
i915_gem_object_flush_map(batch->obj);
i915_gem_object_unpin_map(batch->obj);
return batch;
}
static int poison_registers(struct intel_context *ce, u32 poison, u32 *sema)
{
struct i915_request *rq;
struct i915_vma *batch;
u32 *cs;
int err;
batch = load_context(ce, poison);
if (IS_ERR(batch))
return PTR_ERR(batch);
rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_batch;
}
err = igt_vma_move_to_active_unlocked(batch, rq, 0);
if (err)
goto err_rq;
cs = intel_ring_begin(rq, 8);
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
goto err_rq;
}
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
*cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
*cs++ = lower_32_bits(i915_vma_offset(batch));
*cs++ = upper_32_bits(i915_vma_offset(batch));
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
offset_in_page(sema);
*cs++ = 0;
*cs++ = 1;
intel_ring_advance(rq, cs);
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
err_rq:
i915_request_add(rq);
err_batch:
i915_vma_put(batch);
return err;
}
static bool is_moving(u32 a, u32 b)
{
return a != b;
}
static int compare_isolation(struct intel_engine_cs *engine,
struct i915_vma *ref[2],
struct i915_vma *result[2],
struct intel_context *ce,
u32 poison)
{
u32 x, dw, *hw, *lrc;
u32 *A[2], *B[2];
u32 *defaults;
int err = 0;
A[0] = i915_gem_object_pin_map_unlocked(ref[0]->obj, I915_MAP_WC);
if (IS_ERR(A[0]))
return PTR_ERR(A[0]);
A[1] = i915_gem_object_pin_map_unlocked(ref[1]->obj, I915_MAP_WC);
if (IS_ERR(A[1])) {
err = PTR_ERR(A[1]);
goto err_A0;
}
B[0] = i915_gem_object_pin_map_unlocked(result[0]->obj, I915_MAP_WC);
if (IS_ERR(B[0])) {
err = PTR_ERR(B[0]);
goto err_A1;
}
B[1] = i915_gem_object_pin_map_unlocked(result[1]->obj, I915_MAP_WC);
if (IS_ERR(B[1])) {
err = PTR_ERR(B[1]);
goto err_B0;
}
lrc = i915_gem_object_pin_map_unlocked(ce->state->obj,
intel_gt_coherent_map_type(engine->gt,
ce->state->obj,
false));
if (IS_ERR(lrc)) {
err = PTR_ERR(lrc);
goto err_B1;
}
lrc += LRC_STATE_OFFSET / sizeof(*hw);
defaults = shmem_pin_map(ce->engine->default_state);
if (!defaults) {
err = -ENOMEM;
goto err_lrc;
}
x = 0;
dw = 0;
hw = defaults;
hw += LRC_STATE_OFFSET / sizeof(*hw);
do {
u32 len = hw[dw] & LRI_LENGTH_MASK;
/* For simplicity, break parsing at the first complex command */
if ((hw[dw] >> INSTR_CLIENT_SHIFT) != INSTR_MI_CLIENT)
break;
if (hw[dw] == 0) {
dw++;
continue;
}
if ((hw[dw] & GENMASK(31, 23)) != LRI_HEADER) {
dw += len + 2;
continue;
}
if (!len) {
pr_err("%s: invalid LRI found in context image\n",
engine->name);
igt_hexdump(defaults, PAGE_SIZE);
break;
}
dw++;
len = (len + 1) / 2;
while (len--) {
if (!is_moving(A[0][x], A[1][x]) &&
(A[0][x] != B[0][x] || A[1][x] != B[1][x])) {
switch (hw[dw] & 4095) {
case 0x30: /* RING_HEAD */
case 0x34: /* RING_TAIL */
break;
default:
pr_err("%s[%d]: Mismatch for register %4x, default %08x, reference %08x, result (%08x, %08x), poison %08x, context %08x\n",
engine->name, dw,
hw[dw], hw[dw + 1],
A[0][x], B[0][x], B[1][x],
poison, lrc[dw + 1]);
err = -EINVAL;
}
}
dw += 2;
x++;
}
} while (dw < PAGE_SIZE / sizeof(u32) &&
(hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
shmem_unpin_map(ce->engine->default_state, defaults);
err_lrc:
i915_gem_object_unpin_map(ce->state->obj);
err_B1:
i915_gem_object_unpin_map(result[1]->obj);
err_B0:
i915_gem_object_unpin_map(result[0]->obj);
err_A1:
i915_gem_object_unpin_map(ref[1]->obj);
err_A0:
i915_gem_object_unpin_map(ref[0]->obj);
return err;
}
static struct i915_vma *
create_result_vma(struct i915_address_space *vm, unsigned long sz)
{
struct i915_vma *vma;
void *ptr;
vma = create_user_vma(vm, sz);
if (IS_ERR(vma))
return vma;
/* Set the results to a known value distinct from the poison */
ptr = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WC);
if (IS_ERR(ptr)) {
i915_vma_put(vma);
return ERR_CAST(ptr);
}
memset(ptr, POISON_INUSE, vma->size);
i915_gem_object_flush_map(vma->obj);
i915_gem_object_unpin_map(vma->obj);
return vma;
}
static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison)
{
u32 *sema = memset32(engine->status_page.addr + 1000, 0, 1);
struct i915_vma *ref[2], *result[2];
struct intel_context *A, *B;
struct i915_request *rq;
int err;
A = intel_context_create(engine);
if (IS_ERR(A))
return PTR_ERR(A);
B = intel_context_create(engine);
if (IS_ERR(B)) {
err = PTR_ERR(B);
goto err_A;
}
ref[0] = create_result_vma(A->vm, SZ_64K);
if (IS_ERR(ref[0])) {
err = PTR_ERR(ref[0]);
goto err_B;
}
ref[1] = create_result_vma(A->vm, SZ_64K);
if (IS_ERR(ref[1])) {
err = PTR_ERR(ref[1]);
goto err_ref0;
}
rq = record_registers(A, ref[0], ref[1], sema);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ref1;
}
WRITE_ONCE(*sema, 1);
wmb();
if (i915_request_wait(rq, 0, HZ / 2) < 0) {
i915_request_put(rq);
err = -ETIME;
goto err_ref1;
}
i915_request_put(rq);
result[0] = create_result_vma(A->vm, SZ_64K);
if (IS_ERR(result[0])) {
err = PTR_ERR(result[0]);
goto err_ref1;
}
result[1] = create_result_vma(A->vm, SZ_64K);
if (IS_ERR(result[1])) {
err = PTR_ERR(result[1]);
goto err_result0;
}
rq = record_registers(A, result[0], result[1], sema);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_result1;
}
err = poison_registers(B, poison, sema);
if (err == 0 && i915_request_wait(rq, 0, HZ / 2) < 0) {
pr_err("%s(%s): wait for results timed out\n",
__func__, engine->name);
err = -ETIME;
}
/* Always cancel the semaphore wait, just in case the GPU gets stuck */
WRITE_ONCE(*sema, -1);
i915_request_put(rq);
if (err)
goto err_result1;
err = compare_isolation(engine, ref, result, A, poison);
err_result1:
i915_vma_put(result[1]);
err_result0:
i915_vma_put(result[0]);
err_ref1:
i915_vma_put(ref[1]);
err_ref0:
i915_vma_put(ref[0]);
err_B:
intel_context_put(B);
err_A:
intel_context_put(A);
return err;
}
static bool skip_isolation(const struct intel_engine_cs *engine)
{
if (engine->class == COPY_ENGINE_CLASS && GRAPHICS_VER(engine->i915) == 9)
return true;
if (engine->class == RENDER_CLASS && GRAPHICS_VER(engine->i915) == 11)
return true;
return false;
}
static int live_lrc_isolation(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
const u32 poison[] = {
STACK_MAGIC,
0x3a3a3a3a,
0x5c5c5c5c,
0xffffffff,
0xffff0000,
};
int err = 0;
/*
* Our goal is try and verify that per-context state cannot be
* tampered with by another non-privileged client.
*
* We take the list of context registers from the LRI in the default
* context image and attempt to modify that list from a remote context.
*/
for_each_engine(engine, gt, id) {
int i;
/* Just don't even ask */
if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN) &&
skip_isolation(engine))
continue;
intel_engine_pm_get(engine);
for (i = 0; i < ARRAY_SIZE(poison); i++) {
int result;
result = __lrc_isolation(engine, poison[i]);
if (result && !err)
err = result;
result = __lrc_isolation(engine, ~poison[i]);
if (result && !err)
err = result;
}
intel_engine_pm_put(engine);
if (igt_flush_test(gt->i915)) {
err = -EIO;
break;
}
}
return err;
}
static int indirect_ctx_submit_req(struct intel_context *ce)
{
struct i915_request *rq;
int err = 0;
rq = intel_context_create_request(ce);
if (IS_ERR(rq))
return PTR_ERR(rq);
i915_request_get(rq);
i915_request_add(rq);
if (i915_request_wait(rq, 0, HZ / 5) < 0)
err = -ETIME;
i915_request_put(rq);
return err;
}
#define CTX_BB_CANARY_OFFSET (3 * 1024)
#define CTX_BB_CANARY_INDEX (CTX_BB_CANARY_OFFSET / sizeof(u32))
static u32 *
emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
{
*cs++ = MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT |
MI_LRI_LRM_CS_MMIO;
*cs++ = i915_mmio_reg_offset(RING_START(0));
*cs++ = i915_ggtt_offset(ce->state) +
context_wa_bb_offset(ce) +
CTX_BB_CANARY_OFFSET;
*cs++ = 0;
return cs;
}
static void
indirect_ctx_bb_setup(struct intel_context *ce)
{
u32 *cs = context_indirect_bb(ce);
cs[CTX_BB_CANARY_INDEX] = 0xdeadf00d;
setup_indirect_ctx_bb(ce, ce->engine, emit_indirect_ctx_bb_canary);
}
static bool check_ring_start(struct intel_context *ce)
{
const u32 * const ctx_bb = (void *)(ce->lrc_reg_state) -
LRC_STATE_OFFSET + context_wa_bb_offset(ce);
if (ctx_bb[CTX_BB_CANARY_INDEX] == ce->lrc_reg_state[CTX_RING_START])
return true;
pr_err("ring start mismatch: canary 0x%08x vs state 0x%08x\n",
ctx_bb[CTX_BB_CANARY_INDEX],
ce->lrc_reg_state[CTX_RING_START]);
return false;
}
static int indirect_ctx_bb_check(struct intel_context *ce)
{
int err;
err = indirect_ctx_submit_req(ce);
if (err)
return err;
if (!check_ring_start(ce))
return -EINVAL;
return 0;
}
static int __live_lrc_indirect_ctx_bb(struct intel_engine_cs *engine)
{
struct intel_context *a, *b;
int err;
a = intel_context_create(engine);
if (IS_ERR(a))
return PTR_ERR(a);
err = intel_context_pin(a);
if (err)
goto put_a;
b = intel_context_create(engine);
if (IS_ERR(b)) {
err = PTR_ERR(b);
goto unpin_a;
}
err = intel_context_pin(b);
if (err)
goto put_b;
/* We use the already reserved extra page in context state */
if (!a->wa_bb_page) {
GEM_BUG_ON(b->wa_bb_page);
GEM_BUG_ON(GRAPHICS_VER(engine->i915) == 12);
goto unpin_b;
}
/*
* In order to test that our per context bb is truly per context,
* and executes at the intended spot on context restoring process,
* make the batch store the ring start value to memory.
* As ring start is restored apriori of starting the indirect ctx bb and
* as it will be different for each context, it fits to this purpose.
*/
indirect_ctx_bb_setup(a);
indirect_ctx_bb_setup(b);
err = indirect_ctx_bb_check(a);
if (err)
goto unpin_b;
err = indirect_ctx_bb_check(b);
unpin_b:
intel_context_unpin(b);
put_b:
intel_context_put(b);
unpin_a:
intel_context_unpin(a);
put_a:
intel_context_put(a);
return err;
}
static int live_lrc_indirect_ctx_bb(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = 0;
for_each_engine(engine, gt, id) {
intel_engine_pm_get(engine);
err = __live_lrc_indirect_ctx_bb(engine);
intel_engine_pm_put(engine);
if (igt_flush_test(gt->i915))
err = -EIO;
if (err)
break;
}
return err;
}
static void garbage_reset(struct intel_engine_cs *engine,
struct i915_request *rq)
{
const unsigned int bit = I915_RESET_ENGINE + engine->id;
unsigned long *lock = &engine->gt->reset.flags;
local_bh_disable();
if (!test_and_set_bit(bit, lock)) {
tasklet_disable(&engine->sched_engine->tasklet);
if (!rq->fence.error)
__intel_engine_reset_bh(engine, NULL);
tasklet_enable(&engine->sched_engine->tasklet);
clear_and_wake_up_bit(bit, lock);
}
local_bh_enable();
}
static struct i915_request *garbage(struct intel_context *ce,
struct rnd_state *prng)
{
struct i915_request *rq;
int err;
err = intel_context_pin(ce);
if (err)
return ERR_PTR(err);
prandom_bytes_state(prng,
ce->lrc_reg_state,
ce->engine->context_size -
LRC_STATE_OFFSET);
rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_unpin;
}
i915_request_get(rq);
i915_request_add(rq);
return rq;
err_unpin:
intel_context_unpin(ce);
return ERR_PTR(err);
}
static int __lrc_garbage(struct intel_engine_cs *engine, struct rnd_state *prng)
{
struct intel_context *ce;
struct i915_request *hang;
int err = 0;
ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
hang = garbage(ce, prng);
if (IS_ERR(hang)) {
err = PTR_ERR(hang);
goto err_ce;
}
if (wait_for_submit(engine, hang, HZ / 2)) {
i915_request_put(hang);
err = -ETIME;
goto err_ce;
}
intel_context_set_banned(ce);
garbage_reset(engine, hang);
intel_engine_flush_submission(engine);
if (!hang->fence.error) {
i915_request_put(hang);
pr_err("%s: corrupted context was not reset\n",
engine->name);
err = -EINVAL;
goto err_ce;
}
if (i915_request_wait(hang, 0, HZ / 2) < 0) {
pr_err("%s: corrupted context did not recover\n",
engine->name);
i915_request_put(hang);
err = -EIO;
goto err_ce;
}
i915_request_put(hang);
err_ce:
intel_context_put(ce);
return err;
}
static int live_lrc_garbage(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
/*
* Verify that we can recover if one context state is completely
* corrupted.
*/
if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
return 0;
for_each_engine(engine, gt, id) {
I915_RND_STATE(prng);
int err = 0, i;
if (!intel_has_reset_engine(engine->gt))
continue;
intel_engine_pm_get(engine);
for (i = 0; i < 3; i++) {
err = __lrc_garbage(engine, &prng);
if (err)
break;
}
intel_engine_pm_put(engine);
if (igt_flush_test(gt->i915))
err = -EIO;
if (err)
return err;
}
return 0;
}
static int __live_pphwsp_runtime(struct intel_engine_cs *engine)
{
struct intel_context *ce;
struct i915_request *rq;
IGT_TIMEOUT(end_time);
int err;
ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
ce->stats.runtime.num_underflow = 0;
ce->stats.runtime.max_underflow = 0;
do {
unsigned int loop = 1024;
while (loop) {
rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_rq;
}
if (--loop == 0)
i915_request_get(rq);
i915_request_add(rq);
}
if (__igt_timeout(end_time, NULL))
break;
i915_request_put(rq);
} while (1);
err = i915_request_wait(rq, 0, HZ / 5);
if (err < 0) {
pr_err("%s: request not completed!\n", engine->name);
goto err_wait;
}
igt_flush_test(engine->i915);
pr_info("%s: pphwsp runtime %lluns, average %lluns\n",
engine->name,
intel_context_get_total_runtime_ns(ce),
intel_context_get_avg_runtime_ns(ce));
err = 0;
if (ce->stats.runtime.num_underflow) {
pr_err("%s: pphwsp underflow %u time(s), max %u cycles!\n",
engine->name,
ce->stats.runtime.num_underflow,
ce->stats.runtime.max_underflow);
GEM_TRACE_DUMP();
err = -EOVERFLOW;
}
err_wait:
i915_request_put(rq);
err_rq:
intel_context_put(ce);
return err;
}
static int live_pphwsp_runtime(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = 0;
/*
* Check that cumulative context runtime as stored in the pphwsp[16]
* is monotonic.
*/
for_each_engine(engine, gt, id) {
err = __live_pphwsp_runtime(engine);
if (err)
break;
}
if (igt_flush_test(gt->i915))
err = -EIO;
return err;
}
int intel_lrc_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_lrc_layout),
SUBTEST(live_lrc_fixed),
SUBTEST(live_lrc_state),
SUBTEST(live_lrc_gpr),
SUBTEST(live_lrc_isolation),
SUBTEST(live_lrc_timestamp),
SUBTEST(live_lrc_garbage),
SUBTEST(live_pphwsp_runtime),
SUBTEST(live_lrc_indirect_ctx_bb),
};
if (!HAS_LOGICAL_RING_CONTEXTS(i915))
return 0;
return intel_gt_live_subtests(tests, to_gt(i915));
}
| linux-master | drivers/gpu/drm/i915/gt/selftest_lrc.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2016 Intel Corporation
*/
#include <linux/kthread.h>
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_internal.h"
#include "i915_gem_evict.h"
#include "intel_gt.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
#include "selftest_engine_heartbeat.h"
#include "i915_selftest.h"
#include "selftests/i915_random.h"
#include "selftests/igt_flush_test.h"
#include "selftests/igt_reset.h"
#include "selftests/igt_atomic.h"
#include "selftests/igt_spinner.h"
#include "selftests/intel_scheduler_helpers.h"
#include "selftests/mock_drm.h"
#include "gem/selftests/mock_context.h"
#include "gem/selftests/igt_gem_utils.h"
#define IGT_IDLE_TIMEOUT 50 /* ms; time to wait after flushing between tests */
struct hang {
struct intel_gt *gt;
struct drm_i915_gem_object *hws;
struct drm_i915_gem_object *obj;
struct i915_gem_context *ctx;
u32 *seqno;
u32 *batch;
};
static int hang_init(struct hang *h, struct intel_gt *gt)
{
void *vaddr;
int err;
memset(h, 0, sizeof(*h));
h->gt = gt;
h->ctx = kernel_context(gt->i915, NULL);
if (IS_ERR(h->ctx))
return PTR_ERR(h->ctx);
GEM_BUG_ON(i915_gem_context_is_bannable(h->ctx));
h->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(h->hws)) {
err = PTR_ERR(h->hws);
goto err_ctx;
}
h->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(h->obj)) {
err = PTR_ERR(h->obj);
goto err_hws;
}
i915_gem_object_set_cache_coherency(h->hws, I915_CACHE_LLC);
vaddr = i915_gem_object_pin_map_unlocked(h->hws, I915_MAP_WB);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_obj;
}
h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
vaddr = i915_gem_object_pin_map_unlocked(h->obj,
intel_gt_coherent_map_type(gt, h->obj, false));
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_unpin_hws;
}
h->batch = vaddr;
return 0;
err_unpin_hws:
i915_gem_object_unpin_map(h->hws);
err_obj:
i915_gem_object_put(h->obj);
err_hws:
i915_gem_object_put(h->hws);
err_ctx:
kernel_context_close(h->ctx);
return err;
}
static u64 hws_address(const struct i915_vma *hws,
const struct i915_request *rq)
{
return i915_vma_offset(hws) +
offset_in_page(sizeof(u32) * rq->fence.context);
}
static struct i915_request *
hang_create_request(struct hang *h, struct intel_engine_cs *engine)
{
struct intel_gt *gt = h->gt;
struct i915_address_space *vm = i915_gem_context_get_eb_vm(h->ctx);
struct drm_i915_gem_object *obj;
struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
unsigned int flags;
void *vaddr;
u32 *batch;
int err;
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(obj)) {
i915_vm_put(vm);
return ERR_CAST(obj);
}
vaddr = i915_gem_object_pin_map_unlocked(obj, intel_gt_coherent_map_type(gt, obj, false));
if (IS_ERR(vaddr)) {
i915_gem_object_put(obj);
i915_vm_put(vm);
return ERR_CAST(vaddr);
}
i915_gem_object_unpin_map(h->obj);
i915_gem_object_put(h->obj);
h->obj = obj;
h->batch = vaddr;
vma = i915_vma_instance(h->obj, vm, NULL);
if (IS_ERR(vma)) {
i915_vm_put(vm);
return ERR_CAST(vma);
}
hws = i915_vma_instance(h->hws, vm, NULL);
if (IS_ERR(hws)) {
i915_vm_put(vm);
return ERR_CAST(hws);
}
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err) {
i915_vm_put(vm);
return ERR_PTR(err);
}
err = i915_vma_pin(hws, 0, 0, PIN_USER);
if (err)
goto unpin_vma;
rq = igt_request_alloc(h->ctx, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto unpin_hws;
}
err = igt_vma_move_to_active_unlocked(vma, rq, 0);
if (err)
goto cancel_rq;
err = igt_vma_move_to_active_unlocked(hws, rq, 0);
if (err)
goto cancel_rq;
batch = h->batch;
if (GRAPHICS_VER(gt->i915) >= 8) {
*batch++ = MI_STORE_DWORD_IMM_GEN4;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = upper_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
*batch++ = MI_NOOP;
memset(batch, 0, 1024);
batch += 1024 / sizeof(*batch);
*batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
*batch++ = lower_32_bits(i915_vma_offset(vma));
*batch++ = upper_32_bits(i915_vma_offset(vma));
} else if (GRAPHICS_VER(gt->i915) >= 6) {
*batch++ = MI_STORE_DWORD_IMM_GEN4;
*batch++ = 0;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
*batch++ = MI_NOOP;
memset(batch, 0, 1024);
batch += 1024 / sizeof(*batch);
*batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 1 << 8;
*batch++ = lower_32_bits(i915_vma_offset(vma));
} else if (GRAPHICS_VER(gt->i915) >= 4) {
*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*batch++ = 0;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
*batch++ = MI_NOOP;
memset(batch, 0, 1024);
batch += 1024 / sizeof(*batch);
*batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
*batch++ = lower_32_bits(i915_vma_offset(vma));
} else {
*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
*batch++ = MI_NOOP;
memset(batch, 0, 1024);
batch += 1024 / sizeof(*batch);
*batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
*batch++ = lower_32_bits(i915_vma_offset(vma));
}
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
intel_gt_chipset_flush(engine->gt);
if (rq->engine->emit_init_breadcrumb) {
err = rq->engine->emit_init_breadcrumb(rq);
if (err)
goto cancel_rq;
}
flags = 0;
if (GRAPHICS_VER(gt->i915) <= 5)
flags |= I915_DISPATCH_SECURE;
err = rq->engine->emit_bb_start(rq, i915_vma_offset(vma), PAGE_SIZE, flags);
cancel_rq:
if (err) {
i915_request_set_error_once(rq, err);
i915_request_add(rq);
}
unpin_hws:
i915_vma_unpin(hws);
unpin_vma:
i915_vma_unpin(vma);
i915_vm_put(vm);
return err ? ERR_PTR(err) : rq;
}
static u32 hws_seqno(const struct hang *h, const struct i915_request *rq)
{
return READ_ONCE(h->seqno[rq->fence.context % (PAGE_SIZE/sizeof(u32))]);
}
static void hang_fini(struct hang *h)
{
*h->batch = MI_BATCH_BUFFER_END;
intel_gt_chipset_flush(h->gt);
i915_gem_object_unpin_map(h->obj);
i915_gem_object_put(h->obj);
i915_gem_object_unpin_map(h->hws);
i915_gem_object_put(h->hws);
kernel_context_close(h->ctx);
igt_flush_test(h->gt->i915);
}
static bool wait_until_running(struct hang *h, struct i915_request *rq)
{
return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq),
rq->fence.seqno),
10) &&
wait_for(i915_seqno_passed(hws_seqno(h, rq),
rq->fence.seqno),
1000));
}
static int igt_hang_sanitycheck(void *arg)
{
struct intel_gt *gt = arg;
struct i915_request *rq;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct hang h;
int err;
/* Basic check that we can execute our hanging batch */
err = hang_init(&h, gt);
if (err)
return err;
for_each_engine(engine, gt, id) {
struct intel_wedge_me w;
long timeout;
if (!intel_engine_can_store_dword(engine))
continue;
rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
pr_err("Failed to create request for %s, err=%d\n",
engine->name, err);
goto fini;
}
i915_request_get(rq);
*h.batch = MI_BATCH_BUFFER_END;
intel_gt_chipset_flush(engine->gt);
i915_request_add(rq);
timeout = 0;
intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */)
timeout = i915_request_wait(rq, 0,
MAX_SCHEDULE_TIMEOUT);
if (intel_gt_is_wedged(gt))
timeout = -EIO;
i915_request_put(rq);
if (timeout < 0) {
err = timeout;
pr_err("Wait for request failed on %s, err=%d\n",
engine->name, err);
goto fini;
}
}
fini:
hang_fini(&h);
return err;
}
static bool wait_for_idle(struct intel_engine_cs *engine)
{
return wait_for(intel_engine_is_idle(engine), IGT_IDLE_TIMEOUT) == 0;
}
static int igt_reset_nop(void *arg)
{
struct intel_gt *gt = arg;
struct i915_gpu_error *global = >->i915->gpu_error;
struct intel_engine_cs *engine;
unsigned int reset_count, count;
enum intel_engine_id id;
IGT_TIMEOUT(end_time);
int err = 0;
/* Check that we can reset during non-user portions of requests */
reset_count = i915_reset_count(global);
count = 0;
do {
for_each_engine(engine, gt, id) {
struct intel_context *ce;
int i;
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
pr_err("[%s] Create context failed: %d!\n", engine->name, err);
break;
}
for (i = 0; i < 16; i++) {
struct i915_request *rq;
rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
pr_err("[%s] Create request failed: %d!\n",
engine->name, err);
break;
}
i915_request_add(rq);
}
intel_context_put(ce);
}
igt_global_reset_lock(gt);
intel_gt_reset(gt, ALL_ENGINES, NULL);
igt_global_reset_unlock(gt);
if (intel_gt_is_wedged(gt)) {
pr_err("[%s] GT is wedged!\n", engine->name);
err = -EIO;
break;
}
if (i915_reset_count(global) != reset_count + ++count) {
pr_err("[%s] Reset not recorded: %d vs %d + %d!\n",
engine->name, i915_reset_count(global), reset_count, count);
err = -EINVAL;
break;
}
err = igt_flush_test(gt->i915);
if (err) {
pr_err("[%s] Flush failed: %d!\n", engine->name, err);
break;
}
} while (time_before(jiffies, end_time));
pr_info("%s: %d resets\n", __func__, count);
if (igt_flush_test(gt->i915)) {
pr_err("Post flush failed: %d!\n", err);
err = -EIO;
}
return err;
}
static int igt_reset_nop_engine(void *arg)
{
struct intel_gt *gt = arg;
struct i915_gpu_error *global = >->i915->gpu_error;
struct intel_engine_cs *engine;
enum intel_engine_id id;
/* Check that we can engine-reset during non-user portions */
if (!intel_has_reset_engine(gt))
return 0;
for_each_engine(engine, gt, id) {
unsigned int reset_count, reset_engine_count, count;
struct intel_context *ce;
IGT_TIMEOUT(end_time);
int err;
if (intel_engine_uses_guc(engine)) {
/* Engine level resets are triggered by GuC when a hang
* is detected. They can't be triggered by the KMD any
* more. Thus a nop batch cannot be used as a reset test
*/
continue;
}
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
pr_err("[%s] Create context failed: %pe!\n", engine->name, ce);
return PTR_ERR(ce);
}
reset_count = i915_reset_count(global);
reset_engine_count = i915_reset_engine_count(global, engine);
count = 0;
st_engine_heartbeat_disable(engine);
GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id,
>->reset.flags));
do {
int i;
if (!wait_for_idle(engine)) {
pr_err("%s failed to idle before reset\n",
engine->name);
err = -EIO;
break;
}
for (i = 0; i < 16; i++) {
struct i915_request *rq;
rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
struct drm_printer p =
drm_info_printer(gt->i915->drm.dev);
intel_engine_dump(engine, &p,
"%s(%s): failed to submit request\n",
__func__,
engine->name);
GEM_TRACE("%s(%s): failed to submit request\n",
__func__,
engine->name);
GEM_TRACE_DUMP();
intel_gt_set_wedged(gt);
err = PTR_ERR(rq);
break;
}
i915_request_add(rq);
}
err = intel_engine_reset(engine, NULL);
if (err) {
pr_err("intel_engine_reset(%s) failed, err:%d\n",
engine->name, err);
break;
}
if (i915_reset_count(global) != reset_count) {
pr_err("Full GPU reset recorded! (engine reset expected)\n");
err = -EINVAL;
break;
}
if (i915_reset_engine_count(global, engine) !=
reset_engine_count + ++count) {
pr_err("%s engine reset not recorded!\n",
engine->name);
err = -EINVAL;
break;
}
} while (time_before(jiffies, end_time));
clear_and_wake_up_bit(I915_RESET_ENGINE + id, >->reset.flags);
st_engine_heartbeat_enable(engine);
pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
intel_context_put(ce);
if (igt_flush_test(gt->i915))
err = -EIO;
if (err)
return err;
}
return 0;
}
static void force_reset_timeout(struct intel_engine_cs *engine)
{
engine->reset_timeout.probability = 999;
atomic_set(&engine->reset_timeout.times, -1);
}
static void cancel_reset_timeout(struct intel_engine_cs *engine)
{
memset(&engine->reset_timeout, 0, sizeof(engine->reset_timeout));
}
static int igt_reset_fail_engine(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
/* Check that we can recover from engine-reset failues */
if (!intel_has_reset_engine(gt))
return 0;
for_each_engine(engine, gt, id) {
unsigned int count;
struct intel_context *ce;
IGT_TIMEOUT(end_time);
int err;
/* Can't manually break the reset if i915 doesn't perform it */
if (intel_engine_uses_guc(engine))
continue;
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
pr_err("[%s] Create context failed: %pe!\n", engine->name, ce);
return PTR_ERR(ce);
}
st_engine_heartbeat_disable(engine);
GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id,
>->reset.flags));
force_reset_timeout(engine);
err = intel_engine_reset(engine, NULL);
cancel_reset_timeout(engine);
if (err == 0) /* timeouts only generated on gen8+ */
goto skip;
count = 0;
do {
struct i915_request *last = NULL;
int i;
if (!wait_for_idle(engine)) {
pr_err("%s failed to idle before reset\n",
engine->name);
err = -EIO;
break;
}
for (i = 0; i < count % 15; i++) {
struct i915_request *rq;
rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
struct drm_printer p =
drm_info_printer(gt->i915->drm.dev);
intel_engine_dump(engine, &p,
"%s(%s): failed to submit request\n",
__func__,
engine->name);
GEM_TRACE("%s(%s): failed to submit request\n",
__func__,
engine->name);
GEM_TRACE_DUMP();
intel_gt_set_wedged(gt);
if (last)
i915_request_put(last);
err = PTR_ERR(rq);
goto out;
}
if (last)
i915_request_put(last);
last = i915_request_get(rq);
i915_request_add(rq);
}
if (count & 1) {
err = intel_engine_reset(engine, NULL);
if (err) {
GEM_TRACE_ERR("intel_engine_reset(%s) failed, err:%d\n",
engine->name, err);
GEM_TRACE_DUMP();
i915_request_put(last);
break;
}
} else {
force_reset_timeout(engine);
err = intel_engine_reset(engine, NULL);
cancel_reset_timeout(engine);
if (err != -ETIMEDOUT) {
pr_err("intel_engine_reset(%s) did not fail, err:%d\n",
engine->name, err);
i915_request_put(last);
break;
}
}
err = 0;
if (last) {
if (i915_request_wait(last, 0, HZ / 2) < 0) {
struct drm_printer p =
drm_info_printer(gt->i915->drm.dev);
intel_engine_dump(engine, &p,
"%s(%s): failed to complete request\n",
__func__,
engine->name);
GEM_TRACE("%s(%s): failed to complete request\n",
__func__,
engine->name);
GEM_TRACE_DUMP();
err = -EIO;
}
i915_request_put(last);
}
count++;
} while (err == 0 && time_before(jiffies, end_time));
out:
pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
skip:
clear_and_wake_up_bit(I915_RESET_ENGINE + id, >->reset.flags);
st_engine_heartbeat_enable(engine);
intel_context_put(ce);
if (igt_flush_test(gt->i915))
err = -EIO;
if (err)
return err;
}
return 0;
}
static int __igt_reset_engine(struct intel_gt *gt, bool active)
{
struct i915_gpu_error *global = >->i915->gpu_error;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct hang h;
int err = 0;
/* Check that we can issue an engine reset on an idle engine (no-op) */
if (!intel_has_reset_engine(gt))
return 0;
if (active) {
err = hang_init(&h, gt);
if (err)
return err;
}
for_each_engine(engine, gt, id) {
unsigned int reset_count, reset_engine_count;
unsigned long count;
bool using_guc = intel_engine_uses_guc(engine);
IGT_TIMEOUT(end_time);
if (using_guc && !active)
continue;
if (active && !intel_engine_can_store_dword(engine))
continue;
if (!wait_for_idle(engine)) {
pr_err("%s failed to idle before reset\n",
engine->name);
err = -EIO;
break;
}
reset_count = i915_reset_count(global);
reset_engine_count = i915_reset_engine_count(global, engine);
st_engine_heartbeat_disable(engine);
GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id,
>->reset.flags));
count = 0;
do {
struct i915_request *rq = NULL;
struct intel_selftest_saved_policy saved;
int err2;
err = intel_selftest_modify_policy(engine, &saved,
SELFTEST_SCHEDULER_MODIFY_FAST_RESET);
if (err) {
pr_err("[%s] Modify policy failed: %d!\n", engine->name, err);
break;
}
if (active) {
rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
pr_err("[%s] Create hang request failed: %d!\n",
engine->name, err);
goto restore;
}
i915_request_get(rq);
i915_request_add(rq);
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(engine, &p,
"%s\n", engine->name);
i915_request_put(rq);
err = -EIO;
goto restore;
}
}
if (!using_guc) {
err = intel_engine_reset(engine, NULL);
if (err) {
pr_err("intel_engine_reset(%s) failed, err:%d\n",
engine->name, err);
goto skip;
}
}
if (rq) {
/* Ensure the reset happens and kills the engine */
err = intel_selftest_wait_for_rq(rq);
if (err)
pr_err("[%s] Wait for request %lld:%lld [0x%04X] failed: %d!\n",
engine->name, rq->fence.context,
rq->fence.seqno, rq->context->guc_id.id, err);
}
skip:
if (rq)
i915_request_put(rq);
if (i915_reset_count(global) != reset_count) {
pr_err("Full GPU reset recorded! (engine reset expected)\n");
err = -EINVAL;
goto restore;
}
/* GuC based resets are not logged per engine */
if (!using_guc) {
if (i915_reset_engine_count(global, engine) !=
++reset_engine_count) {
pr_err("%s engine reset not recorded!\n",
engine->name);
err = -EINVAL;
goto restore;
}
}
count++;
restore:
err2 = intel_selftest_restore_policy(engine, &saved);
if (err2)
pr_err("[%s] Restore policy failed: %d!\n", engine->name, err);
if (err == 0)
err = err2;
if (err)
break;
} while (time_before(jiffies, end_time));
clear_and_wake_up_bit(I915_RESET_ENGINE + id, >->reset.flags);
st_engine_heartbeat_enable(engine);
pr_info("%s: Completed %lu %s resets\n",
engine->name, count, active ? "active" : "idle");
if (err)
break;
err = igt_flush_test(gt->i915);
if (err) {
pr_err("[%s] Flush failed: %d!\n", engine->name, err);
break;
}
}
if (intel_gt_is_wedged(gt)) {
pr_err("GT is wedged!\n");
err = -EIO;
}
if (active)
hang_fini(&h);
return err;
}
static int igt_reset_idle_engine(void *arg)
{
return __igt_reset_engine(arg, false);
}
static int igt_reset_active_engine(void *arg)
{
return __igt_reset_engine(arg, true);
}
struct active_engine {
struct kthread_worker *worker;
struct kthread_work work;
struct intel_engine_cs *engine;
unsigned long resets;
unsigned int flags;
bool stop;
int result;
};
#define TEST_ACTIVE BIT(0)
#define TEST_OTHERS BIT(1)
#define TEST_SELF BIT(2)
#define TEST_PRIORITY BIT(3)
static int active_request_put(struct i915_request *rq)
{
int err = 0;
if (!rq)
return 0;
if (i915_request_wait(rq, 0, 10 * HZ) < 0) {
GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld\n",
rq->engine->name,
rq->fence.context,
rq->fence.seqno);
GEM_TRACE_DUMP();
intel_gt_set_wedged(rq->engine->gt);
err = -EIO;
}
i915_request_put(rq);
return err;
}
static void active_engine(struct kthread_work *work)
{
I915_RND_STATE(prng);
struct active_engine *arg = container_of(work, typeof(*arg), work);
struct intel_engine_cs *engine = arg->engine;
struct i915_request *rq[8] = {};
struct intel_context *ce[ARRAY_SIZE(rq)];
unsigned long count;
int err = 0;
for (count = 0; count < ARRAY_SIZE(ce); count++) {
ce[count] = intel_context_create(engine);
if (IS_ERR(ce[count])) {
arg->result = PTR_ERR(ce[count]);
pr_err("[%s] Create context #%ld failed: %d!\n",
engine->name, count, arg->result);
while (--count)
intel_context_put(ce[count]);
return;
}
}
count = 0;
while (!READ_ONCE(arg->stop)) {
unsigned int idx = count++ & (ARRAY_SIZE(rq) - 1);
struct i915_request *old = rq[idx];
struct i915_request *new;
new = intel_context_create_request(ce[idx]);
if (IS_ERR(new)) {
err = PTR_ERR(new);
pr_err("[%s] Create request #%d failed: %d!\n", engine->name, idx, err);
break;
}
rq[idx] = i915_request_get(new);
i915_request_add(new);
if (engine->sched_engine->schedule && arg->flags & TEST_PRIORITY) {
struct i915_sched_attr attr = {
.priority =
i915_prandom_u32_max_state(512, &prng),
};
engine->sched_engine->schedule(rq[idx], &attr);
}
err = active_request_put(old);
if (err) {
pr_err("[%s] Request put failed: %d!\n", engine->name, err);
break;
}
cond_resched();
}
for (count = 0; count < ARRAY_SIZE(rq); count++) {
int err__ = active_request_put(rq[count]);
if (err)
pr_err("[%s] Request put #%ld failed: %d!\n", engine->name, count, err);
/* Keep the first error */
if (!err)
err = err__;
intel_context_put(ce[count]);
}
arg->result = err;
}
static int __igt_reset_engines(struct intel_gt *gt,
const char *test_name,
unsigned int flags)
{
struct i915_gpu_error *global = >->i915->gpu_error;
struct intel_engine_cs *engine, *other;
struct active_engine *threads;
enum intel_engine_id id, tmp;
struct hang h;
int err = 0;
/* Check that issuing a reset on one engine does not interfere
* with any other engine.
*/
if (!intel_has_reset_engine(gt))
return 0;
if (flags & TEST_ACTIVE) {
err = hang_init(&h, gt);
if (err)
return err;
if (flags & TEST_PRIORITY)
h.ctx->sched.priority = 1024;
}
threads = kmalloc_array(I915_NUM_ENGINES, sizeof(*threads), GFP_KERNEL);
if (!threads)
return -ENOMEM;
for_each_engine(engine, gt, id) {
unsigned long device = i915_reset_count(global);
unsigned long count = 0, reported;
bool using_guc = intel_engine_uses_guc(engine);
IGT_TIMEOUT(end_time);
if (flags & TEST_ACTIVE) {
if (!intel_engine_can_store_dword(engine))
continue;
} else if (using_guc)
continue;
if (!wait_for_idle(engine)) {
pr_err("i915_reset_engine(%s:%s): failed to idle before reset\n",
engine->name, test_name);
err = -EIO;
break;
}
memset(threads, 0, sizeof(*threads) * I915_NUM_ENGINES);
for_each_engine(other, gt, tmp) {
struct kthread_worker *worker;
threads[tmp].resets =
i915_reset_engine_count(global, other);
if (other == engine && !(flags & TEST_SELF))
continue;
if (other != engine && !(flags & TEST_OTHERS))
continue;
threads[tmp].engine = other;
threads[tmp].flags = flags;
worker = kthread_create_worker(0, "igt/%s",
other->name);
if (IS_ERR(worker)) {
err = PTR_ERR(worker);
pr_err("[%s] Worker create failed: %d!\n",
engine->name, err);
goto unwind;
}
threads[tmp].worker = worker;
kthread_init_work(&threads[tmp].work, active_engine);
kthread_queue_work(threads[tmp].worker,
&threads[tmp].work);
}
st_engine_heartbeat_disable_no_pm(engine);
GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id,
>->reset.flags));
do {
struct i915_request *rq = NULL;
struct intel_selftest_saved_policy saved;
int err2;
err = intel_selftest_modify_policy(engine, &saved,
SELFTEST_SCHEDULER_MODIFY_FAST_RESET);
if (err) {
pr_err("[%s] Modify policy failed: %d!\n", engine->name, err);
break;
}
if (flags & TEST_ACTIVE) {
rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
pr_err("[%s] Create hang request failed: %d!\n",
engine->name, err);
goto restore;
}
i915_request_get(rq);
i915_request_add(rq);
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(engine, &p,
"%s\n", engine->name);
i915_request_put(rq);
err = -EIO;
goto restore;
}
} else {
intel_engine_pm_get(engine);
}
if (!using_guc) {
err = intel_engine_reset(engine, NULL);
if (err) {
pr_err("i915_reset_engine(%s:%s): failed, err=%d\n",
engine->name, test_name, err);
goto restore;
}
}
if (rq) {
/* Ensure the reset happens and kills the engine */
err = intel_selftest_wait_for_rq(rq);
if (err)
pr_err("[%s] Wait for request %lld:%lld [0x%04X] failed: %d!\n",
engine->name, rq->fence.context,
rq->fence.seqno, rq->context->guc_id.id, err);
}
count++;
if (rq) {
if (rq->fence.error != -EIO) {
pr_err("i915_reset_engine(%s:%s): failed to reset request %lld:%lld [0x%04X]\n",
engine->name, test_name,
rq->fence.context,
rq->fence.seqno, rq->context->guc_id.id);
i915_request_put(rq);
GEM_TRACE_DUMP();
intel_gt_set_wedged(gt);
err = -EIO;
goto restore;
}
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
struct drm_printer p =
drm_info_printer(gt->i915->drm.dev);
pr_err("i915_reset_engine(%s:%s):"
" failed to complete request %llx:%lld after reset\n",
engine->name, test_name,
rq->fence.context,
rq->fence.seqno);
intel_engine_dump(engine, &p,
"%s\n", engine->name);
i915_request_put(rq);
GEM_TRACE_DUMP();
intel_gt_set_wedged(gt);
err = -EIO;
goto restore;
}
i915_request_put(rq);
}
if (!(flags & TEST_ACTIVE))
intel_engine_pm_put(engine);
if (!(flags & TEST_SELF) && !wait_for_idle(engine)) {
struct drm_printer p =
drm_info_printer(gt->i915->drm.dev);
pr_err("i915_reset_engine(%s:%s):"
" failed to idle after reset\n",
engine->name, test_name);
intel_engine_dump(engine, &p,
"%s\n", engine->name);
err = -EIO;
goto restore;
}
restore:
err2 = intel_selftest_restore_policy(engine, &saved);
if (err2)
pr_err("[%s] Restore policy failed: %d!\n", engine->name, err2);
if (err == 0)
err = err2;
if (err)
break;
} while (time_before(jiffies, end_time));
clear_and_wake_up_bit(I915_RESET_ENGINE + id, >->reset.flags);
st_engine_heartbeat_enable_no_pm(engine);
pr_info("i915_reset_engine(%s:%s): %lu resets\n",
engine->name, test_name, count);
/* GuC based resets are not logged per engine */
if (!using_guc) {
reported = i915_reset_engine_count(global, engine);
reported -= threads[engine->id].resets;
if (reported != count) {
pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu\n",
engine->name, test_name, count, reported);
if (!err)
err = -EINVAL;
}
}
unwind:
for_each_engine(other, gt, tmp) {
int ret;
if (!threads[tmp].worker)
continue;
WRITE_ONCE(threads[tmp].stop, true);
kthread_flush_work(&threads[tmp].work);
ret = READ_ONCE(threads[tmp].result);
if (ret) {
pr_err("kthread for other engine %s failed, err=%d\n",
other->name, ret);
if (!err)
err = ret;
}
kthread_destroy_worker(threads[tmp].worker);
/* GuC based resets are not logged per engine */
if (!using_guc) {
if (other->uabi_class != engine->uabi_class &&
threads[tmp].resets !=
i915_reset_engine_count(global, other)) {
pr_err("Innocent engine %s was reset (count=%ld)\n",
other->name,
i915_reset_engine_count(global, other) -
threads[tmp].resets);
if (!err)
err = -EINVAL;
}
}
}
if (device != i915_reset_count(global)) {
pr_err("Global reset (count=%ld)!\n",
i915_reset_count(global) - device);
if (!err)
err = -EINVAL;
}
if (err)
break;
err = igt_flush_test(gt->i915);
if (err) {
pr_err("[%s] Flush failed: %d!\n", engine->name, err);
break;
}
}
kfree(threads);
if (intel_gt_is_wedged(gt))
err = -EIO;
if (flags & TEST_ACTIVE)
hang_fini(&h);
return err;
}
static int igt_reset_engines(void *arg)
{
static const struct {
const char *name;
unsigned int flags;
} phases[] = {
{ "idle", 0 },
{ "active", TEST_ACTIVE },
{ "others-idle", TEST_OTHERS },
{ "others-active", TEST_OTHERS | TEST_ACTIVE },
{
"others-priority",
TEST_OTHERS | TEST_ACTIVE | TEST_PRIORITY
},
{
"self-priority",
TEST_ACTIVE | TEST_PRIORITY | TEST_SELF,
},
{ }
};
struct intel_gt *gt = arg;
typeof(*phases) *p;
int err;
for (p = phases; p->name; p++) {
if (p->flags & TEST_PRIORITY) {
if (!(gt->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
continue;
}
err = __igt_reset_engines(arg, p->name, p->flags);
if (err)
return err;
}
return 0;
}
static u32 fake_hangcheck(struct intel_gt *gt, intel_engine_mask_t mask)
{
u32 count = i915_reset_count(>->i915->gpu_error);
intel_gt_reset(gt, mask, NULL);
return count;
}
static int igt_reset_wait(void *arg)
{
struct intel_gt *gt = arg;
struct i915_gpu_error *global = >->i915->gpu_error;
struct intel_engine_cs *engine;
struct i915_request *rq;
unsigned int reset_count;
struct hang h;
long timeout;
int err;
engine = intel_selftest_find_any_engine(gt);
if (!engine || !intel_engine_can_store_dword(engine))
return 0;
/* Check that we detect a stuck waiter and issue a reset */
igt_global_reset_lock(gt);
err = hang_init(&h, gt);
if (err) {
pr_err("[%s] Hang init failed: %d!\n", engine->name, err);
goto unlock;
}
rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
pr_err("[%s] Create hang request failed: %d!\n", engine->name, err);
goto fini;
}
i915_request_get(rq);
i915_request_add(rq);
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
intel_gt_set_wedged(gt);
err = -EIO;
goto out_rq;
}
reset_count = fake_hangcheck(gt, ALL_ENGINES);
timeout = i915_request_wait(rq, 0, 10);
if (timeout < 0) {
pr_err("i915_request_wait failed on a stuck request: err=%ld\n",
timeout);
err = timeout;
goto out_rq;
}
if (i915_reset_count(global) == reset_count) {
pr_err("No GPU reset recorded!\n");
err = -EINVAL;
goto out_rq;
}
out_rq:
i915_request_put(rq);
fini:
hang_fini(&h);
unlock:
igt_global_reset_unlock(gt);
if (intel_gt_is_wedged(gt))
return -EIO;
return err;
}
struct evict_vma {
struct completion completion;
struct i915_vma *vma;
};
static int evict_vma(void *data)
{
struct evict_vma *arg = data;
struct i915_address_space *vm = arg->vma->vm;
struct drm_mm_node evict = arg->vma->node;
int err;
complete(&arg->completion);
mutex_lock(&vm->mutex);
err = i915_gem_evict_for_node(vm, NULL, &evict, 0);
mutex_unlock(&vm->mutex);
return err;
}
static int evict_fence(void *data)
{
struct evict_vma *arg = data;
int err;
complete(&arg->completion);
/* Mark the fence register as dirty to force the mmio update. */
err = i915_gem_object_set_tiling(arg->vma->obj, I915_TILING_Y, 512);
if (err) {
pr_err("Invalid Y-tiling settings; err:%d\n", err);
return err;
}
err = i915_vma_pin(arg->vma, 0, 0, PIN_GLOBAL | PIN_MAPPABLE);
if (err) {
pr_err("Unable to pin vma for Y-tiled fence; err:%d\n", err);
return err;
}
err = i915_vma_pin_fence(arg->vma);
i915_vma_unpin(arg->vma);
if (err) {
pr_err("Unable to pin Y-tiled fence; err:%d\n", err);
return err;
}
i915_vma_unpin_fence(arg->vma);
return 0;
}
static int __igt_reset_evict_vma(struct intel_gt *gt,
struct i915_address_space *vm,
int (*fn)(void *),
unsigned int flags)
{
struct intel_engine_cs *engine;
struct drm_i915_gem_object *obj;
struct task_struct *tsk = NULL;
struct i915_request *rq;
struct evict_vma arg;
struct hang h;
unsigned int pin_flags;
int err;
if (!gt->ggtt->num_fences && flags & EXEC_OBJECT_NEEDS_FENCE)
return 0;
engine = intel_selftest_find_any_engine(gt);
if (!engine || !intel_engine_can_store_dword(engine))
return 0;
/* Check that we can recover an unbind stuck on a hanging request */
err = hang_init(&h, gt);
if (err) {
pr_err("[%s] Hang init failed: %d!\n", engine->name, err);
return err;
}
obj = i915_gem_object_create_internal(gt->i915, SZ_1M);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
pr_err("[%s] Create object failed: %d!\n", engine->name, err);
goto fini;
}
if (flags & EXEC_OBJECT_NEEDS_FENCE) {
err = i915_gem_object_set_tiling(obj, I915_TILING_X, 512);
if (err) {
pr_err("Invalid X-tiling settings; err:%d\n", err);
goto out_obj;
}
}
arg.vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(arg.vma)) {
err = PTR_ERR(arg.vma);
pr_err("[%s] VMA instance failed: %d!\n", engine->name, err);
goto out_obj;
}
rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
pr_err("[%s] Create hang request failed: %d!\n", engine->name, err);
goto out_obj;
}
pin_flags = i915_vma_is_ggtt(arg.vma) ? PIN_GLOBAL : PIN_USER;
if (flags & EXEC_OBJECT_NEEDS_FENCE)
pin_flags |= PIN_MAPPABLE;
err = i915_vma_pin(arg.vma, 0, 0, pin_flags);
if (err) {
i915_request_add(rq);
pr_err("[%s] VMA pin failed: %d!\n", engine->name, err);
goto out_obj;
}
if (flags & EXEC_OBJECT_NEEDS_FENCE) {
err = i915_vma_pin_fence(arg.vma);
if (err) {
pr_err("Unable to pin X-tiled fence; err:%d\n", err);
i915_vma_unpin(arg.vma);
i915_request_add(rq);
goto out_obj;
}
}
err = igt_vma_move_to_active_unlocked(arg.vma, rq, flags);
if (err)
pr_err("[%s] Move to active failed: %d!\n", engine->name, err);
if (flags & EXEC_OBJECT_NEEDS_FENCE)
i915_vma_unpin_fence(arg.vma);
i915_vma_unpin(arg.vma);
i915_request_get(rq);
i915_request_add(rq);
if (err)
goto out_rq;
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
intel_gt_set_wedged(gt);
goto out_reset;
}
init_completion(&arg.completion);
tsk = kthread_run(fn, &arg, "igt/evict_vma");
if (IS_ERR(tsk)) {
err = PTR_ERR(tsk);
pr_err("[%s] Thread spawn failed: %d!\n", engine->name, err);
tsk = NULL;
goto out_reset;
}
get_task_struct(tsk);
wait_for_completion(&arg.completion);
if (wait_for(!list_empty(&rq->fence.cb_list), 10)) {
struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("igt/evict_vma kthread did not wait\n");
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
intel_gt_set_wedged(gt);
goto out_reset;
}
out_reset:
igt_global_reset_lock(gt);
fake_hangcheck(gt, rq->engine->mask);
igt_global_reset_unlock(gt);
if (tsk) {
struct intel_wedge_me w;
/* The reset, even indirectly, should take less than 10ms. */
intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */)
err = kthread_stop(tsk);
put_task_struct(tsk);
}
out_rq:
i915_request_put(rq);
out_obj:
i915_gem_object_put(obj);
fini:
hang_fini(&h);
if (intel_gt_is_wedged(gt))
return -EIO;
return err;
}
static int igt_reset_evict_ggtt(void *arg)
{
struct intel_gt *gt = arg;
return __igt_reset_evict_vma(gt, >->ggtt->vm,
evict_vma, EXEC_OBJECT_WRITE);
}
static int igt_reset_evict_ppgtt(void *arg)
{
struct intel_gt *gt = arg;
struct i915_ppgtt *ppgtt;
int err;
/* aliasing == global gtt locking, covered above */
if (INTEL_PPGTT(gt->i915) < INTEL_PPGTT_FULL)
return 0;
ppgtt = i915_ppgtt_create(gt, 0);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
err = __igt_reset_evict_vma(gt, &ppgtt->vm,
evict_vma, EXEC_OBJECT_WRITE);
i915_vm_put(&ppgtt->vm);
return err;
}
static int igt_reset_evict_fence(void *arg)
{
struct intel_gt *gt = arg;
return __igt_reset_evict_vma(gt, >->ggtt->vm,
evict_fence, EXEC_OBJECT_NEEDS_FENCE);
}
static int wait_for_others(struct intel_gt *gt,
struct intel_engine_cs *exclude)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, gt, id) {
if (engine == exclude)
continue;
if (!wait_for_idle(engine))
return -EIO;
}
return 0;
}
static int igt_reset_queue(void *arg)
{
struct intel_gt *gt = arg;
struct i915_gpu_error *global = >->i915->gpu_error;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct hang h;
int err;
/* Check that we replay pending requests following a hang */
igt_global_reset_lock(gt);
err = hang_init(&h, gt);
if (err)
goto unlock;
for_each_engine(engine, gt, id) {
struct intel_selftest_saved_policy saved;
struct i915_request *prev;
IGT_TIMEOUT(end_time);
unsigned int count;
bool using_guc = intel_engine_uses_guc(engine);
if (!intel_engine_can_store_dword(engine))
continue;
if (using_guc) {
err = intel_selftest_modify_policy(engine, &saved,
SELFTEST_SCHEDULER_MODIFY_NO_HANGCHECK);
if (err) {
pr_err("[%s] Modify policy failed: %d!\n", engine->name, err);
goto fini;
}
}
prev = hang_create_request(&h, engine);
if (IS_ERR(prev)) {
err = PTR_ERR(prev);
pr_err("[%s] Create 'prev' hang request failed: %d!\n", engine->name, err);
goto restore;
}
i915_request_get(prev);
i915_request_add(prev);
count = 0;
do {
struct i915_request *rq;
unsigned int reset_count;
rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
pr_err("[%s] Create hang request failed: %d!\n", engine->name, err);
goto restore;
}
i915_request_get(rq);
i915_request_add(rq);
/*
* XXX We don't handle resetting the kernel context
* very well. If we trigger a device reset twice in
* quick succession while the kernel context is
* executing, we may end up skipping the breadcrumb.
* This is really only a problem for the selftest as
* normally there is a large interlude between resets
* (hangcheck), or we focus on resetting just one
* engine and so avoid repeatedly resetting innocents.
*/
err = wait_for_others(gt, engine);
if (err) {
pr_err("%s(%s): Failed to idle other inactive engines after device reset\n",
__func__, engine->name);
i915_request_put(rq);
i915_request_put(prev);
GEM_TRACE_DUMP();
intel_gt_set_wedged(gt);
goto restore;
}
if (!wait_until_running(&h, prev)) {
struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("%s(%s): Failed to start request %llx, at %x\n",
__func__, engine->name,
prev->fence.seqno, hws_seqno(&h, prev));
intel_engine_dump(engine, &p,
"%s\n", engine->name);
i915_request_put(rq);
i915_request_put(prev);
intel_gt_set_wedged(gt);
err = -EIO;
goto restore;
}
reset_count = fake_hangcheck(gt, BIT(id));
if (prev->fence.error != -EIO) {
pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n",
prev->fence.error);
i915_request_put(rq);
i915_request_put(prev);
err = -EINVAL;
goto restore;
}
if (rq->fence.error) {
pr_err("Fence error status not zero [%d] after unrelated reset\n",
rq->fence.error);
i915_request_put(rq);
i915_request_put(prev);
err = -EINVAL;
goto restore;
}
if (i915_reset_count(global) == reset_count) {
pr_err("No GPU reset recorded!\n");
i915_request_put(rq);
i915_request_put(prev);
err = -EINVAL;
goto restore;
}
i915_request_put(prev);
prev = rq;
count++;
} while (time_before(jiffies, end_time));
pr_info("%s: Completed %d queued resets\n",
engine->name, count);
*h.batch = MI_BATCH_BUFFER_END;
intel_gt_chipset_flush(engine->gt);
i915_request_put(prev);
restore:
if (using_guc) {
int err2 = intel_selftest_restore_policy(engine, &saved);
if (err2)
pr_err("%s:%d> [%s] Restore policy failed: %d!\n",
__func__, __LINE__, engine->name, err2);
if (err == 0)
err = err2;
}
if (err)
goto fini;
err = igt_flush_test(gt->i915);
if (err) {
pr_err("[%s] Flush failed: %d!\n", engine->name, err);
break;
}
}
fini:
hang_fini(&h);
unlock:
igt_global_reset_unlock(gt);
if (intel_gt_is_wedged(gt))
return -EIO;
return err;
}
static int igt_handle_error(void *arg)
{
struct intel_gt *gt = arg;
struct i915_gpu_error *global = >->i915->gpu_error;
struct intel_engine_cs *engine;
struct hang h;
struct i915_request *rq;
struct i915_gpu_coredump *error;
int err;
engine = intel_selftest_find_any_engine(gt);
/* Check that we can issue a global GPU and engine reset */
if (!intel_has_reset_engine(gt))
return 0;
if (!engine || !intel_engine_can_store_dword(engine))
return 0;
err = hang_init(&h, gt);
if (err) {
pr_err("[%s] Hang init failed: %d!\n", engine->name, err);
return err;
}
rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
pr_err("[%s] Create hang request failed: %d!\n", engine->name, err);
goto err_fini;
}
i915_request_get(rq);
i915_request_add(rq);
if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
intel_gt_set_wedged(gt);
err = -EIO;
goto err_request;
}
/* Temporarily disable error capture */
error = xchg(&global->first_error, (void *)-1);
intel_gt_handle_error(gt, engine->mask, 0, NULL);
xchg(&global->first_error, error);
if (rq->fence.error != -EIO) {
pr_err("Guilty request not identified!\n");
err = -EINVAL;
goto err_request;
}
err_request:
i915_request_put(rq);
err_fini:
hang_fini(&h);
return err;
}
static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
const struct igt_atomic_section *p,
const char *mode)
{
struct tasklet_struct * const t = &engine->sched_engine->tasklet;
int err;
GEM_TRACE("i915_reset_engine(%s:%s) under %s\n",
engine->name, mode, p->name);
if (t->func)
tasklet_disable(t);
if (strcmp(p->name, "softirq"))
local_bh_disable();
p->critical_section_begin();
err = __intel_engine_reset_bh(engine, NULL);
p->critical_section_end();
if (strcmp(p->name, "softirq"))
local_bh_enable();
if (t->func) {
tasklet_enable(t);
tasklet_hi_schedule(t);
}
if (err)
pr_err("i915_reset_engine(%s:%s) failed under %s\n",
engine->name, mode, p->name);
return err;
}
static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
const struct igt_atomic_section *p)
{
struct i915_request *rq;
struct hang h;
int err;
err = __igt_atomic_reset_engine(engine, p, "idle");
if (err)
return err;
err = hang_init(&h, engine->gt);
if (err) {
pr_err("[%s] Hang init failed: %d!\n", engine->name, err);
return err;
}
rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
pr_err("[%s] Create hang request failed: %d!\n", engine->name, err);
goto out;
}
i915_request_get(rq);
i915_request_add(rq);
if (wait_until_running(&h, rq)) {
err = __igt_atomic_reset_engine(engine, p, "active");
} else {
pr_err("%s(%s): Failed to start request %llx, at %x\n",
__func__, engine->name,
rq->fence.seqno, hws_seqno(&h, rq));
intel_gt_set_wedged(engine->gt);
err = -EIO;
}
if (err == 0) {
struct intel_wedge_me w;
intel_wedge_on_timeout(&w, engine->gt, HZ / 20 /* 50ms */)
i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
if (intel_gt_is_wedged(engine->gt))
err = -EIO;
}
i915_request_put(rq);
out:
hang_fini(&h);
return err;
}
static int igt_reset_engines_atomic(void *arg)
{
struct intel_gt *gt = arg;
const typeof(*igt_atomic_phases) *p;
int err = 0;
/* Check that the engines resets are usable from atomic context */
if (!intel_has_reset_engine(gt))
return 0;
if (intel_uc_uses_guc_submission(>->uc))
return 0;
igt_global_reset_lock(gt);
/* Flush any requests before we get started and check basics */
if (!igt_force_reset(gt))
goto unlock;
for (p = igt_atomic_phases; p->name; p++) {
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, gt, id) {
err = igt_atomic_reset_engine(engine, p);
if (err)
goto out;
}
}
out:
/* As we poke around the guts, do a full reset before continuing. */
igt_force_reset(gt);
unlock:
igt_global_reset_unlock(gt);
return err;
}
int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_hang_sanitycheck),
SUBTEST(igt_reset_nop),
SUBTEST(igt_reset_nop_engine),
SUBTEST(igt_reset_idle_engine),
SUBTEST(igt_reset_active_engine),
SUBTEST(igt_reset_fail_engine),
SUBTEST(igt_reset_engines),
SUBTEST(igt_reset_engines_atomic),
SUBTEST(igt_reset_queue),
SUBTEST(igt_reset_wait),
SUBTEST(igt_reset_evict_ggtt),
SUBTEST(igt_reset_evict_ppgtt),
SUBTEST(igt_reset_evict_fence),
SUBTEST(igt_handle_error),
};
struct intel_gt *gt = to_gt(i915);
intel_wakeref_t wakeref;
int err;
if (!intel_has_gpu_reset(gt))
return 0;
if (intel_gt_is_wedged(gt))
return -EIO; /* we're long past hope of a successful reset */
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
err = intel_gt_live_subtests(tests, gt);
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
return err;
}
| linux-master | drivers/gpu/drm/i915/gt/selftest_hangcheck.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2014 Intel Corporation
*
* Generated by: intel-gpu-tools-1.8-220-g01153e7
*/
#include "intel_renderstate.h"
static const u32 gen6_null_state_relocs[] = {
0x00000020,
0x00000024,
0x0000002c,
0x000001e0,
0x000001e4,
-1,
};
static const u32 gen6_null_state_batch[] = {
0x69040000,
0x790d0001,
0x00000000,
0x00000000,
0x78180000,
0x00000001,
0x61010008,
0x00000000,
0x00000001, /* reloc */
0x00000001, /* reloc */
0x00000000,
0x00000001, /* reloc */
0x00000000,
0x00000001,
0x00000000,
0x00000001,
0x61020000,
0x00000000,
0x78050001,
0x00000018,
0x00000000,
0x780d1002,
0x00000000,
0x00000000,
0x00000420,
0x78150003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78100004,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78160003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78110005,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78120002,
0x00000000,
0x00000000,
0x00000000,
0x78170003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x79050005,
0xe0040000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x79100000,
0x00000000,
0x79000002,
0xffffffff,
0x00000000,
0x00000000,
0x780e0002,
0x00000441,
0x00000401,
0x00000401,
0x78021002,
0x00000000,
0x00000000,
0x00000400,
0x78130012,
0x00400810,
0x00000000,
0x20000000,
0x04000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78140007,
0x00000280,
0x08080000,
0x00000000,
0x00060000,
0x4e080002,
0x00100400,
0x00000000,
0x00000000,
0x78090005,
0x02000000,
0x22220000,
0x02f60000,
0x11330000,
0x02850004,
0x11220000,
0x78011002,
0x00000000,
0x00000000,
0x00000200,
0x78080003,
0x00002000,
0x00000448, /* reloc */
0x00000448, /* reloc */
0x00000000,
0x05000000, /* cmds end */
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000220, /* state start */
0x00000240,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x0060005a,
0x204077be,
0x000000c0,
0x008d0040,
0x0060005a,
0x206077be,
0x000000c0,
0x008d0080,
0x0060005a,
0x208077be,
0x000000d0,
0x008d0040,
0x0060005a,
0x20a077be,
0x000000d0,
0x008d0080,
0x00000201,
0x20080061,
0x00000000,
0x00000000,
0x00600001,
0x20200022,
0x008d0000,
0x00000000,
0x02800031,
0x21c01cc9,
0x00000020,
0x0a8a0001,
0x00600001,
0x204003be,
0x008d01c0,
0x00000000,
0x00600001,
0x206003be,
0x008d01e0,
0x00000000,
0x00600001,
0x208003be,
0x008d0200,
0x00000000,
0x00600001,
0x20a003be,
0x008d0220,
0x00000000,
0x00600001,
0x20c003be,
0x008d0240,
0x00000000,
0x00600001,
0x20e003be,
0x008d0260,
0x00000000,
0x00600001,
0x210003be,
0x008d0280,
0x00000000,
0x00600001,
0x212003be,
0x008d02a0,
0x00000000,
0x05800031,
0x24001cc8,
0x00000040,
0x90019000,
0x0000007e,
0x00000000,
0x00000000,
0x00000000,
0x0000007e,
0x00000000,
0x00000000,
0x00000000,
0x0000007e,
0x00000000,
0x00000000,
0x00000000,
0x0000007e,
0x00000000,
0x00000000,
0x00000000,
0x0000007e,
0x00000000,
0x00000000,
0x00000000,
0x0000007e,
0x00000000,
0x00000000,
0x00000000,
0x0000007e,
0x00000000,
0x00000000,
0x00000000,
0x0000007e,
0x00000000,
0x00000000,
0x00000000,
0x30000000,
0x00000124,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0xf99a130c,
0x799a130c,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x80000031,
0x00000003,
0x00000000, /* state end */
};
RO_RENDERSTATE(6);
| linux-master | drivers/gpu/drm/i915/gt/gen6_renderstate.c |
// SPDX-License-Identifier: MIT
/*
* Copyright(c) 2019-2022, Intel Corporation. All rights reserved.
*/
#include <linux/irq.h>
#include <linux/mei_aux.h>
#include "i915_drv.h"
#include "i915_reg.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
#include "gt/intel_gsc.h"
#include "gt/intel_gt.h"
#define GSC_BAR_LENGTH 0x00000FFC
static void gsc_irq_mask(struct irq_data *d)
{
/* generic irq handling */
}
static void gsc_irq_unmask(struct irq_data *d)
{
/* generic irq handling */
}
static struct irq_chip gsc_irq_chip = {
.name = "gsc_irq_chip",
.irq_mask = gsc_irq_mask,
.irq_unmask = gsc_irq_unmask,
};
static int gsc_irq_init(int irq)
{
irq_set_chip_and_handler_name(irq, &gsc_irq_chip,
handle_simple_irq, "gsc_irq_handler");
return irq_set_chip_data(irq, NULL);
}
static int
gsc_ext_om_alloc(struct intel_gsc *gsc, struct intel_gsc_intf *intf, size_t size)
{
struct intel_gt *gt = gsc_to_gt(gsc);
struct drm_i915_gem_object *obj;
int err;
obj = i915_gem_object_create_lmem(gt->i915, size,
I915_BO_ALLOC_CONTIGUOUS |
I915_BO_ALLOC_CPU_CLEAR);
if (IS_ERR(obj)) {
drm_err(>->i915->drm, "Failed to allocate gsc memory\n");
return PTR_ERR(obj);
}
err = i915_gem_object_pin_pages_unlocked(obj);
if (err) {
drm_err(>->i915->drm, "Failed to pin pages for gsc memory\n");
goto out_put;
}
intf->gem_obj = obj;
return 0;
out_put:
i915_gem_object_put(obj);
return err;
}
static void gsc_ext_om_destroy(struct intel_gsc_intf *intf)
{
struct drm_i915_gem_object *obj = fetch_and_zero(&intf->gem_obj);
if (!obj)
return;
if (i915_gem_object_has_pinned_pages(obj))
i915_gem_object_unpin_pages(obj);
i915_gem_object_put(obj);
}
struct gsc_def {
const char *name;
unsigned long bar;
size_t bar_size;
bool use_polling;
bool slow_firmware;
size_t lmem_size;
};
/* gsc resources and definitions (HECI1 and HECI2) */
static const struct gsc_def gsc_def_dg1[] = {
{
/* HECI1 not yet implemented. */
},
{
.name = "mei-gscfi",
.bar = DG1_GSC_HECI2_BASE,
.bar_size = GSC_BAR_LENGTH,
}
};
static const struct gsc_def gsc_def_xehpsdv[] = {
{
/* HECI1 not enabled on the device. */
},
{
.name = "mei-gscfi",
.bar = DG1_GSC_HECI2_BASE,
.bar_size = GSC_BAR_LENGTH,
.use_polling = true,
.slow_firmware = true,
}
};
static const struct gsc_def gsc_def_dg2[] = {
{
.name = "mei-gsc",
.bar = DG2_GSC_HECI1_BASE,
.bar_size = GSC_BAR_LENGTH,
.lmem_size = SZ_4M,
},
{
.name = "mei-gscfi",
.bar = DG2_GSC_HECI2_BASE,
.bar_size = GSC_BAR_LENGTH,
}
};
static void gsc_release_dev(struct device *dev)
{
struct auxiliary_device *aux_dev = to_auxiliary_dev(dev);
struct mei_aux_device *adev = auxiliary_dev_to_mei_aux_dev(aux_dev);
kfree(adev);
}
static void gsc_destroy_one(struct drm_i915_private *i915,
struct intel_gsc *gsc, unsigned int intf_id)
{
struct intel_gsc_intf *intf = &gsc->intf[intf_id];
if (intf->adev) {
struct auxiliary_device *aux_dev = &intf->adev->aux_dev;
if (intf_id == 0)
intel_huc_unregister_gsc_notifier(&gsc_to_gt(gsc)->uc.huc,
aux_dev->dev.bus);
auxiliary_device_delete(aux_dev);
auxiliary_device_uninit(aux_dev);
intf->adev = NULL;
}
if (intf->irq >= 0)
irq_free_desc(intf->irq);
intf->irq = -1;
gsc_ext_om_destroy(intf);
}
static void gsc_init_one(struct drm_i915_private *i915, struct intel_gsc *gsc,
unsigned int intf_id)
{
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct mei_aux_device *adev;
struct auxiliary_device *aux_dev;
const struct gsc_def *def;
struct intel_gsc_intf *intf = &gsc->intf[intf_id];
int ret;
intf->irq = -1;
intf->id = intf_id;
/*
* On the multi-tile setups the GSC is functional on the first tile only
*/
if (gsc_to_gt(gsc)->info.id != 0) {
drm_dbg(&i915->drm, "Not initializing gsc for remote tiles\n");
return;
}
if (intf_id == 0 && !HAS_HECI_PXP(i915))
return;
if (IS_DG1(i915)) {
def = &gsc_def_dg1[intf_id];
} else if (IS_XEHPSDV(i915)) {
def = &gsc_def_xehpsdv[intf_id];
} else if (IS_DG2(i915)) {
def = &gsc_def_dg2[intf_id];
} else {
drm_warn_once(&i915->drm, "Unknown platform\n");
return;
}
if (!def->name) {
drm_warn_once(&i915->drm, "HECI%d is not implemented!\n", intf_id + 1);
return;
}
/* skip irq initialization */
if (def->use_polling)
goto add_device;
intf->irq = irq_alloc_desc(0);
if (intf->irq < 0) {
drm_err(&i915->drm, "gsc irq error %d\n", intf->irq);
goto fail;
}
ret = gsc_irq_init(intf->irq);
if (ret < 0) {
drm_err(&i915->drm, "gsc irq init failed %d\n", ret);
goto fail;
}
add_device:
adev = kzalloc(sizeof(*adev), GFP_KERNEL);
if (!adev)
goto fail;
if (def->lmem_size) {
drm_dbg(&i915->drm, "setting up GSC lmem\n");
if (gsc_ext_om_alloc(gsc, intf, def->lmem_size)) {
drm_err(&i915->drm, "setting up gsc extended operational memory failed\n");
kfree(adev);
goto fail;
}
adev->ext_op_mem.start = i915_gem_object_get_dma_address(intf->gem_obj, 0);
adev->ext_op_mem.end = adev->ext_op_mem.start + def->lmem_size;
}
adev->irq = intf->irq;
adev->bar.parent = &pdev->resource[0];
adev->bar.start = def->bar + pdev->resource[0].start;
adev->bar.end = adev->bar.start + def->bar_size - 1;
adev->bar.flags = IORESOURCE_MEM;
adev->bar.desc = IORES_DESC_NONE;
adev->slow_firmware = def->slow_firmware;
aux_dev = &adev->aux_dev;
aux_dev->name = def->name;
aux_dev->id = (pci_domain_nr(pdev->bus) << 16) |
PCI_DEVID(pdev->bus->number, pdev->devfn);
aux_dev->dev.parent = &pdev->dev;
aux_dev->dev.release = gsc_release_dev;
ret = auxiliary_device_init(aux_dev);
if (ret < 0) {
drm_err(&i915->drm, "gsc aux init failed %d\n", ret);
kfree(adev);
goto fail;
}
intf->adev = adev; /* needed by the notifier */
if (intf_id == 0)
intel_huc_register_gsc_notifier(&gsc_to_gt(gsc)->uc.huc,
aux_dev->dev.bus);
ret = auxiliary_device_add(aux_dev);
if (ret < 0) {
drm_err(&i915->drm, "gsc aux add failed %d\n", ret);
if (intf_id == 0)
intel_huc_unregister_gsc_notifier(&gsc_to_gt(gsc)->uc.huc,
aux_dev->dev.bus);
intf->adev = NULL;
/* adev will be freed with the put_device() and .release sequence */
auxiliary_device_uninit(aux_dev);
goto fail;
}
return;
fail:
gsc_destroy_one(i915, gsc, intf->id);
}
static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id)
{
int ret;
if (intf_id >= INTEL_GSC_NUM_INTERFACES) {
drm_warn_once(>->i915->drm, "GSC irq: intf_id %d is out of range", intf_id);
return;
}
if (!HAS_HECI_GSC(gt->i915)) {
drm_warn_once(>->i915->drm, "GSC irq: not supported");
return;
}
if (gt->gsc.intf[intf_id].irq < 0)
return;
ret = generic_handle_irq(gt->gsc.intf[intf_id].irq);
if (ret)
drm_err_ratelimited(>->i915->drm, "error handling GSC irq: %d\n", ret);
}
void intel_gsc_irq_handler(struct intel_gt *gt, u32 iir)
{
if (iir & GSC_IRQ_INTF(0))
gsc_irq_handler(gt, 0);
if (iir & GSC_IRQ_INTF(1))
gsc_irq_handler(gt, 1);
}
void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *i915)
{
unsigned int i;
if (!HAS_HECI_GSC(i915))
return;
for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++)
gsc_init_one(i915, gsc, i);
}
void intel_gsc_fini(struct intel_gsc *gsc)
{
struct intel_gt *gt = gsc_to_gt(gsc);
unsigned int i;
if (!HAS_HECI_GSC(gt->i915))
return;
for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++)
gsc_destroy_one(gt->i915, gsc, i);
}
| linux-master | drivers/gpu/drm/i915/gt/intel_gsc.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
#include <linux/slab.h> /* fault-inject.h is not standalone! */
#include <linux/fault-inject.h>
#include <linux/sched/mm.h>
#include <drm/drm_cache.h>
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
#include "i915_reg.h"
#include "i915_trace.h"
#include "i915_utils.h"
#include "intel_gt.h"
#include "intel_gt_mcr.h"
#include "intel_gt_print.h"
#include "intel_gt_regs.h"
#include "intel_gtt.h"
static bool intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915)
{
return IS_BROXTON(i915) && i915_vtd_active(i915);
}
bool intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915)
{
return IS_CHERRYVIEW(i915) || intel_ggtt_update_needs_vtd_wa(i915);
}
struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz)
{
struct drm_i915_gem_object *obj;
/*
* To avoid severe over-allocation when dealing with min_page_size
* restrictions, we override that behaviour here by allowing an object
* size and page layout which can be smaller. In practice this should be
* totally fine, since GTT paging structures are not typically inserted
* into the GTT.
*
* Note that we also hit this path for the scratch page, and for this
* case it might need to be 64K, but that should work fine here since we
* used the passed in size for the page size, which should ensure it
* also has the same alignment.
*/
obj = __i915_gem_object_create_lmem_with_ps(vm->i915, sz, sz,
vm->lmem_pt_obj_flags);
/*
* Ensure all paging structures for this vm share the same dma-resv
* object underneath, with the idea that one object_lock() will lock
* them all at once.
*/
if (!IS_ERR(obj)) {
obj->base.resv = i915_vm_resv_get(vm);
obj->shares_resv_from = vm;
}
return obj;
}
struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
{
struct drm_i915_gem_object *obj;
if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
i915_gem_shrink_all(vm->i915);
obj = i915_gem_object_create_internal(vm->i915, sz);
/*
* Ensure all paging structures for this vm share the same dma-resv
* object underneath, with the idea that one object_lock() will lock
* them all at once.
*/
if (!IS_ERR(obj)) {
obj->base.resv = i915_vm_resv_get(vm);
obj->shares_resv_from = vm;
}
return obj;
}
int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
{
enum i915_map_type type;
void *vaddr;
type = intel_gt_coherent_map_type(vm->gt, obj, true);
vaddr = i915_gem_object_pin_map_unlocked(obj, type);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
i915_gem_object_make_unshrinkable(obj);
return 0;
}
int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
{
enum i915_map_type type;
void *vaddr;
type = intel_gt_coherent_map_type(vm->gt, obj, true);
vaddr = i915_gem_object_pin_map(obj, type);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
i915_gem_object_make_unshrinkable(obj);
return 0;
}
static void clear_vm_list(struct list_head *list)
{
struct i915_vma *vma, *vn;
list_for_each_entry_safe(vma, vn, list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
if (!i915_gem_object_get_rcu(obj)) {
/*
* Object is dying, but has not yet cleared its
* vma list.
* Unbind the dying vma to ensure our list
* is completely drained. We leave the destruction to
* the object destructor to avoid the vma
* disappearing under it.
*/
atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
WARN_ON(__i915_vma_unbind(vma));
/* Remove from the unbound list */
list_del_init(&vma->vm_link);
/*
* Delay the vm and vm mutex freeing until the
* object is done with destruction.
*/
i915_vm_resv_get(vma->vm);
vma->vm_ddestroy = true;
} else {
i915_vma_destroy_locked(vma);
i915_gem_object_put(obj);
}
}
}
static void __i915_vm_close(struct i915_address_space *vm)
{
mutex_lock(&vm->mutex);
clear_vm_list(&vm->bound_list);
clear_vm_list(&vm->unbound_list);
/* Check for must-fix unanticipated side-effects */
GEM_BUG_ON(!list_empty(&vm->bound_list));
GEM_BUG_ON(!list_empty(&vm->unbound_list));
mutex_unlock(&vm->mutex);
}
/* lock the vm into the current ww, if we lock one, we lock all */
int i915_vm_lock_objects(struct i915_address_space *vm,
struct i915_gem_ww_ctx *ww)
{
if (vm->scratch[0]->base.resv == &vm->_resv) {
return i915_gem_object_lock(vm->scratch[0], ww);
} else {
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
/* We borrowed the scratch page from ggtt, take the top level object */
return i915_gem_object_lock(ppgtt->pd->pt.base, ww);
}
}
void i915_address_space_fini(struct i915_address_space *vm)
{
drm_mm_takedown(&vm->mm);
}
/**
* i915_vm_resv_release - Final struct i915_address_space destructor
* @kref: Pointer to the &i915_address_space.resv_ref member.
*
* This function is called when the last lock sharer no longer shares the
* &i915_address_space._resv lock, and also if we raced when
* destroying a vma by the vma destruction
*/
void i915_vm_resv_release(struct kref *kref)
{
struct i915_address_space *vm =
container_of(kref, typeof(*vm), resv_ref);
dma_resv_fini(&vm->_resv);
mutex_destroy(&vm->mutex);
kfree(vm);
}
static void __i915_vm_release(struct work_struct *work)
{
struct i915_address_space *vm =
container_of(work, struct i915_address_space, release_work);
__i915_vm_close(vm);
/* Synchronize async unbinds. */
i915_vma_resource_bind_dep_sync_all(vm);
vm->cleanup(vm);
i915_address_space_fini(vm);
i915_vm_resv_put(vm);
}
void i915_vm_release(struct kref *kref)
{
struct i915_address_space *vm =
container_of(kref, struct i915_address_space, ref);
GEM_BUG_ON(i915_is_ggtt(vm));
trace_i915_ppgtt_release(vm);
queue_work(vm->i915->wq, &vm->release_work);
}
void i915_address_space_init(struct i915_address_space *vm, int subclass)
{
kref_init(&vm->ref);
/*
* Special case for GGTT that has already done an early
* kref_init here.
*/
if (!kref_read(&vm->resv_ref))
kref_init(&vm->resv_ref);
vm->pending_unbind = RB_ROOT_CACHED;
INIT_WORK(&vm->release_work, __i915_vm_release);
/*
* The vm->mutex must be reclaim safe (for use in the shrinker).
* Do a dummy acquire now under fs_reclaim so that any allocation
* attempt holding the lock is immediately reported by lockdep.
*/
mutex_init(&vm->mutex);
lockdep_set_subclass(&vm->mutex, subclass);
if (!intel_vm_no_concurrent_access_wa(vm->i915)) {
i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
} else {
/*
* CHV + BXT VTD workaround use stop_machine(),
* which is allowed to allocate memory. This means &vm->mutex
* is the outer lock, and in theory we can allocate memory inside
* it through stop_machine().
*
* Add the annotation for this, we use trylock in shrinker.
*/
mutex_acquire(&vm->mutex.dep_map, 0, 0, _THIS_IP_);
might_alloc(GFP_KERNEL);
mutex_release(&vm->mutex.dep_map, _THIS_IP_);
}
dma_resv_init(&vm->_resv);
GEM_BUG_ON(!vm->total);
drm_mm_init(&vm->mm, 0, vm->total);
memset64(vm->min_alignment, I915_GTT_MIN_ALIGNMENT,
ARRAY_SIZE(vm->min_alignment));
if (HAS_64K_PAGES(vm->i915)) {
vm->min_alignment[INTEL_MEMORY_LOCAL] = I915_GTT_PAGE_SIZE_64K;
vm->min_alignment[INTEL_MEMORY_STOLEN_LOCAL] = I915_GTT_PAGE_SIZE_64K;
}
vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
INIT_LIST_HEAD(&vm->bound_list);
INIT_LIST_HEAD(&vm->unbound_list);
}
void *__px_vaddr(struct drm_i915_gem_object *p)
{
enum i915_map_type type;
GEM_BUG_ON(!i915_gem_object_has_pages(p));
return page_unpack_bits(p->mm.mapping, &type);
}
dma_addr_t __px_dma(struct drm_i915_gem_object *p)
{
GEM_BUG_ON(!i915_gem_object_has_pages(p));
return sg_dma_address(p->mm.pages->sgl);
}
struct page *__px_page(struct drm_i915_gem_object *p)
{
GEM_BUG_ON(!i915_gem_object_has_pages(p));
return sg_page(p->mm.pages->sgl);
}
void
fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count)
{
void *vaddr = __px_vaddr(p);
memset64(vaddr, val, count);
drm_clflush_virt_range(vaddr, PAGE_SIZE);
}
static void poison_scratch_page(struct drm_i915_gem_object *scratch)
{
void *vaddr = __px_vaddr(scratch);
u8 val;
val = 0;
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
val = POISON_FREE;
memset(vaddr, val, scratch->base.size);
drm_clflush_virt_range(vaddr, scratch->base.size);
}
int setup_scratch_page(struct i915_address_space *vm)
{
unsigned long size;
/*
* In order to utilize 64K pages for an object with a size < 2M, we will
* need to support a 64K scratch page, given that every 16th entry for a
* page-table operating in 64K mode must point to a properly aligned 64K
* region, including any PTEs which happen to point to scratch.
*
* This is only relevant for the 48b PPGTT where we support
* huge-gtt-pages, see also i915_vma_insert(). However, as we share the
* scratch (read-only) between all vm, we create one 64k scratch page
* for all.
*/
size = I915_GTT_PAGE_SIZE_4K;
if (i915_vm_is_4lvl(vm) &&
HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K) &&
!HAS_64K_PAGES(vm->i915))
size = I915_GTT_PAGE_SIZE_64K;
do {
struct drm_i915_gem_object *obj;
obj = vm->alloc_scratch_dma(vm, size);
if (IS_ERR(obj))
goto skip;
if (map_pt_dma(vm, obj))
goto skip_obj;
/* We need a single contiguous page for our scratch */
if (obj->mm.page_sizes.sg < size)
goto skip_obj;
/* And it needs to be correspondingly aligned */
if (__px_dma(obj) & (size - 1))
goto skip_obj;
/*
* Use a non-zero scratch page for debugging.
*
* We want a value that should be reasonably obvious
* to spot in the error state, while also causing a GPU hang
* if executed. We prefer using a clear page in production, so
* should it ever be accidentally used, the effect should be
* fairly benign.
*/
poison_scratch_page(obj);
vm->scratch[0] = obj;
vm->scratch_order = get_order(size);
return 0;
skip_obj:
i915_gem_object_put(obj);
skip:
if (size == I915_GTT_PAGE_SIZE_4K)
return -ENOMEM;
size = I915_GTT_PAGE_SIZE_4K;
} while (1);
}
void free_scratch(struct i915_address_space *vm)
{
int i;
if (!vm->scratch[0])
return;
for (i = 0; i <= vm->top; i++)
i915_gem_object_put(vm->scratch[i]);
}
void gtt_write_workarounds(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
/*
* This function is for gtt related workarounds. This function is
* called on driver load and after a GPU reset, so you can place
* workarounds here even if they get overwritten by GPU reset.
*/
/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
if (IS_BROADWELL(i915))
intel_uncore_write(uncore,
GEN8_L3_LRA_1_GPGPU,
GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
else if (IS_CHERRYVIEW(i915))
intel_uncore_write(uncore,
GEN8_L3_LRA_1_GPGPU,
GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
else if (IS_GEN9_LP(i915))
intel_uncore_write(uncore,
GEN8_L3_LRA_1_GPGPU,
GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
else if (GRAPHICS_VER(i915) >= 9 && GRAPHICS_VER(i915) <= 11)
intel_uncore_write(uncore,
GEN8_L3_LRA_1_GPGPU,
GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
/*
* To support 64K PTEs we need to first enable the use of the
* Intermediate-Page-Size(IPS) bit of the PDE field via some magical
* mmio, otherwise the page-walker will simply ignore the IPS bit. This
* shouldn't be needed after GEN10.
*
* 64K pages were first introduced from BDW+, although technically they
* only *work* from gen9+. For pre-BDW we instead have the option for
* 32K pages, but we don't currently have any support for it in our
* driver.
*/
if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
GRAPHICS_VER(i915) <= 10)
intel_uncore_rmw(uncore,
GEN8_GAMW_ECO_DEV_RW_IA,
0,
GAMW_ECO_ENABLE_64K_IPS_FIELD);
if (IS_GRAPHICS_VER(i915, 8, 11)) {
bool can_use_gtt_cache = true;
/*
* According to the BSpec if we use 2M/1G pages then we also
* need to disable the GTT cache. At least on BDW we can see
* visual corruption when using 2M pages, and not disabling the
* GTT cache.
*/
if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M))
can_use_gtt_cache = false;
/* WaGttCachingOffByDefault */
intel_uncore_write(uncore,
HSW_GTT_CACHE_EN,
can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
gt_WARN_ON_ONCE(gt, can_use_gtt_cache &&
intel_uncore_read(uncore,
HSW_GTT_CACHE_EN) == 0);
}
}
static void xelpmp_setup_private_ppat(struct intel_uncore *uncore)
{
intel_uncore_write(uncore, XELPMP_PAT_INDEX(0),
MTL_PPAT_L4_0_WB);
intel_uncore_write(uncore, XELPMP_PAT_INDEX(1),
MTL_PPAT_L4_1_WT);
intel_uncore_write(uncore, XELPMP_PAT_INDEX(2),
MTL_PPAT_L4_3_UC);
intel_uncore_write(uncore, XELPMP_PAT_INDEX(3),
MTL_PPAT_L4_0_WB | MTL_2_COH_1W);
intel_uncore_write(uncore, XELPMP_PAT_INDEX(4),
MTL_PPAT_L4_0_WB | MTL_3_COH_2W);
/*
* Remaining PAT entries are left at the hardware-default
* fully-cached setting
*/
}
static void xelpg_setup_private_ppat(struct intel_gt *gt)
{
intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(0),
MTL_PPAT_L4_0_WB);
intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(1),
MTL_PPAT_L4_1_WT);
intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(2),
MTL_PPAT_L4_3_UC);
intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(3),
MTL_PPAT_L4_0_WB | MTL_2_COH_1W);
intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(4),
MTL_PPAT_L4_0_WB | MTL_3_COH_2W);
/*
* Remaining PAT entries are left at the hardware-default
* fully-cached setting
*/
}
static void tgl_setup_private_ppat(struct intel_uncore *uncore)
{
/* TGL doesn't support LLC or AGE settings */
intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
}
static void xehp_setup_private_ppat(struct intel_gt *gt)
{
enum forcewake_domains fw;
unsigned long flags;
fw = intel_uncore_forcewake_for_reg(gt->uncore, _MMIO(XEHP_PAT_INDEX(0).reg),
FW_REG_WRITE);
intel_uncore_forcewake_get(gt->uncore, fw);
intel_gt_mcr_lock(gt, &flags);
intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(0), GEN8_PPAT_WB);
intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(1), GEN8_PPAT_WC);
intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(2), GEN8_PPAT_WT);
intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(3), GEN8_PPAT_UC);
intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(4), GEN8_PPAT_WB);
intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(5), GEN8_PPAT_WB);
intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(6), GEN8_PPAT_WB);
intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(7), GEN8_PPAT_WB);
intel_gt_mcr_unlock(gt, flags);
intel_uncore_forcewake_put(gt->uncore, fw);
}
static void icl_setup_private_ppat(struct intel_uncore *uncore)
{
intel_uncore_write(uncore,
GEN10_PAT_INDEX(0),
GEN8_PPAT_WB | GEN8_PPAT_LLC);
intel_uncore_write(uncore,
GEN10_PAT_INDEX(1),
GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
intel_uncore_write(uncore,
GEN10_PAT_INDEX(2),
GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE);
intel_uncore_write(uncore,
GEN10_PAT_INDEX(3),
GEN8_PPAT_UC);
intel_uncore_write(uncore,
GEN10_PAT_INDEX(4),
GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
intel_uncore_write(uncore,
GEN10_PAT_INDEX(5),
GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
intel_uncore_write(uncore,
GEN10_PAT_INDEX(6),
GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
intel_uncore_write(uncore,
GEN10_PAT_INDEX(7),
GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
}
/*
* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
* bits. When using advanced contexts each context stores its own PAT, but
* writing this data shouldn't be harmful even in those cases.
*/
static void bdw_setup_private_ppat(struct intel_uncore *uncore)
{
struct drm_i915_private *i915 = uncore->i915;
u64 pat;
pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
/* for scanout with eLLC */
if (GRAPHICS_VER(i915) >= 9)
pat |= GEN8_PPAT(2, GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE);
else
pat |= GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
}
static void chv_setup_private_ppat(struct intel_uncore *uncore)
{
u64 pat;
/*
* Map WB on BDW to snooped on CHV.
*
* Only the snoop bit has meaning for CHV, the rest is
* ignored.
*
* The hardware will never snoop for certain types of accesses:
* - CPU GTT (GMADR->GGTT->no snoop->memory)
* - PPGTT page tables
* - some other special cycles
*
* As with BDW, we also need to consider the following for GT accesses:
* "For GGTT, there is NO pat_sel[2:0] from the entry,
* so RTL will always use the value corresponding to
* pat_sel = 000".
* Which means we must set the snoop bit in PAT entry 0
* in order to keep the global status page working.
*/
pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
GEN8_PPAT(1, 0) |
GEN8_PPAT(2, 0) |
GEN8_PPAT(3, 0) |
GEN8_PPAT(4, CHV_PPAT_SNOOP) |
GEN8_PPAT(5, CHV_PPAT_SNOOP) |
GEN8_PPAT(6, CHV_PPAT_SNOOP) |
GEN8_PPAT(7, CHV_PPAT_SNOOP);
intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
}
void setup_private_pat(struct intel_gt *gt)
{
struct intel_uncore *uncore = gt->uncore;
struct drm_i915_private *i915 = gt->i915;
GEM_BUG_ON(GRAPHICS_VER(i915) < 8);
if (gt->type == GT_MEDIA) {
xelpmp_setup_private_ppat(gt->uncore);
return;
}
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
xelpg_setup_private_ppat(gt);
else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
xehp_setup_private_ppat(gt);
else if (GRAPHICS_VER(i915) >= 12)
tgl_setup_private_ppat(uncore);
else if (GRAPHICS_VER(i915) >= 11)
icl_setup_private_ppat(uncore);
else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
chv_setup_private_ppat(uncore);
else
bdw_setup_private_ppat(uncore);
}
struct i915_vma *
__vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
obj = i915_gem_object_create_internal(vm->i915, PAGE_ALIGN(size));
if (IS_ERR(obj))
return ERR_CAST(obj);
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
i915_gem_object_put(obj);
return vma;
}
return vma;
}
struct i915_vma *
__vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size)
{
struct i915_vma *vma;
int err;
vma = __vm_create_scratch_for_read(vm, size);
if (IS_ERR(vma))
return vma;
err = i915_vma_pin(vma, 0, 0,
i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
if (err) {
i915_vma_put(vma);
return ERR_PTR(err);
}
return vma;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_gtt.c"
#endif
| linux-master | drivers/gpu/drm/i915/gt/intel_gtt.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2023 Intel Corporation
*/
#include "i915_drv.h"
#include "i915_perf_oa_regs.h"
#include "intel_engine_pm.h"
#include "intel_gt.h"
#include "intel_gt_mcr.h"
#include "intel_gt_pm.h"
#include "intel_gt_print.h"
#include "intel_gt_regs.h"
#include "intel_tlb.h"
/*
* HW architecture suggest typical invalidation time at 40us,
* with pessimistic cases up to 100us and a recommendation to
* cap at 1ms. We go a bit higher just in case.
*/
#define TLB_INVAL_TIMEOUT_US 100
#define TLB_INVAL_TIMEOUT_MS 4
/*
* On Xe_HP the TLB invalidation registers are located at the same MMIO offsets
* but are now considered MCR registers. Since they exist within a GAM range,
* the primary instance of the register rolls up the status from each unit.
*/
static int wait_for_invalidate(struct intel_engine_cs *engine)
{
if (engine->tlb_inv.mcr)
return intel_gt_mcr_wait_for_reg(engine->gt,
engine->tlb_inv.reg.mcr_reg,
engine->tlb_inv.done,
0,
TLB_INVAL_TIMEOUT_US,
TLB_INVAL_TIMEOUT_MS);
else
return __intel_wait_for_register_fw(engine->gt->uncore,
engine->tlb_inv.reg.reg,
engine->tlb_inv.done,
0,
TLB_INVAL_TIMEOUT_US,
TLB_INVAL_TIMEOUT_MS,
NULL);
}
static void mmio_invalidate_full(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
intel_engine_mask_t awake, tmp;
enum intel_engine_id id;
unsigned long flags;
if (GRAPHICS_VER(i915) < 8)
return;
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
intel_gt_mcr_lock(gt, &flags);
spin_lock(&uncore->lock); /* serialise invalidate with GT reset */
awake = 0;
for_each_engine(engine, gt, id) {
if (!intel_engine_pm_is_awake(engine))
continue;
if (engine->tlb_inv.mcr)
intel_gt_mcr_multicast_write_fw(gt,
engine->tlb_inv.reg.mcr_reg,
engine->tlb_inv.request);
else
intel_uncore_write_fw(uncore,
engine->tlb_inv.reg.reg,
engine->tlb_inv.request);
awake |= engine->mask;
}
GT_TRACE(gt, "invalidated engines %08x\n", awake);
/* Wa_2207587034:tgl,dg1,rkl,adl-s,adl-p */
if (awake &&
(IS_TIGERLAKE(i915) ||
IS_DG1(i915) ||
IS_ROCKETLAKE(i915) ||
IS_ALDERLAKE_S(i915) ||
IS_ALDERLAKE_P(i915)))
intel_uncore_write_fw(uncore, GEN12_OA_TLB_INV_CR, 1);
spin_unlock(&uncore->lock);
intel_gt_mcr_unlock(gt, flags);
for_each_engine_masked(engine, gt, awake, tmp) {
if (wait_for_invalidate(engine))
gt_err_ratelimited(gt,
"%s TLB invalidation did not complete in %ums!\n",
engine->name, TLB_INVAL_TIMEOUT_MS);
}
/*
* Use delayed put since a) we mostly expect a flurry of TLB
* invalidations so it is good to avoid paying the forcewake cost and
* b) it works around a bug in Icelake which cannot cope with too rapid
* transitions.
*/
intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
}
static bool tlb_seqno_passed(const struct intel_gt *gt, u32 seqno)
{
u32 cur = intel_gt_tlb_seqno(gt);
/* Only skip if a *full* TLB invalidate barrier has passed */
return (s32)(cur - ALIGN(seqno, 2)) > 0;
}
void intel_gt_invalidate_tlb_full(struct intel_gt *gt, u32 seqno)
{
intel_wakeref_t wakeref;
if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
return;
if (intel_gt_is_wedged(gt))
return;
if (tlb_seqno_passed(gt, seqno))
return;
with_intel_gt_pm_if_awake(gt, wakeref) {
mutex_lock(>->tlb.invalidate_lock);
if (tlb_seqno_passed(gt, seqno))
goto unlock;
mmio_invalidate_full(gt);
write_seqcount_invalidate(>->tlb.seqno);
unlock:
mutex_unlock(>->tlb.invalidate_lock);
}
}
void intel_gt_init_tlb(struct intel_gt *gt)
{
mutex_init(>->tlb.invalidate_lock);
seqcount_mutex_init(>->tlb.seqno, >->tlb.invalidate_lock);
}
void intel_gt_fini_tlb(struct intel_gt *gt)
{
mutex_destroy(>->tlb.invalidate_lock);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_tlb.c"
#endif
| linux-master | drivers/gpu/drm/i915/gt/intel_tlb.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2018 Intel Corporation
*/
#include <linux/crc32.h>
#include "gem/i915_gem_stolen.h"
#include "i915_memcpy.h"
#include "i915_selftest.h"
#include "intel_gpu_commands.h"
#include "selftests/igt_reset.h"
#include "selftests/igt_atomic.h"
#include "selftests/igt_spinner.h"
static int
__igt_reset_stolen(struct intel_gt *gt,
intel_engine_mask_t mask,
const char *msg)
{
struct i915_ggtt *ggtt = gt->ggtt;
const struct resource *dsm = >->i915->dsm.stolen;
resource_size_t num_pages, page;
struct intel_engine_cs *engine;
intel_wakeref_t wakeref;
enum intel_engine_id id;
struct igt_spinner spin;
long max, count;
void *tmp;
u32 *crc;
int err;
if (!drm_mm_node_allocated(&ggtt->error_capture))
return 0;
num_pages = resource_size(dsm) >> PAGE_SHIFT;
if (!num_pages)
return 0;
crc = kmalloc_array(num_pages, sizeof(u32), GFP_KERNEL);
if (!crc)
return -ENOMEM;
tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!tmp) {
err = -ENOMEM;
goto err_crc;
}
igt_global_reset_lock(gt);
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
err = igt_spinner_init(&spin, gt);
if (err)
goto err_lock;
for_each_engine(engine, gt, id) {
struct intel_context *ce;
struct i915_request *rq;
if (!(mask & engine->mask))
continue;
if (!intel_engine_can_store_dword(engine))
continue;
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto err_spin;
}
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
intel_context_put(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_spin;
}
i915_request_add(rq);
}
for (page = 0; page < num_pages; page++) {
dma_addr_t dma = (dma_addr_t)dsm->start + (page << PAGE_SHIFT);
void __iomem *s;
void *in;
ggtt->vm.insert_page(&ggtt->vm, dma,
ggtt->error_capture.start,
i915_gem_get_pat_index(gt->i915,
I915_CACHE_NONE),
0);
mb();
s = io_mapping_map_wc(&ggtt->iomap,
ggtt->error_capture.start,
PAGE_SIZE);
if (!__drm_mm_interval_first(>->i915->mm.stolen,
page << PAGE_SHIFT,
((page + 1) << PAGE_SHIFT) - 1))
memset_io(s, STACK_MAGIC, PAGE_SIZE);
in = (void __force *)s;
if (i915_memcpy_from_wc(tmp, in, PAGE_SIZE))
in = tmp;
crc[page] = crc32_le(0, in, PAGE_SIZE);
io_mapping_unmap(s);
}
mb();
ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
if (mask == ALL_ENGINES) {
intel_gt_reset(gt, mask, NULL);
} else {
for_each_engine(engine, gt, id) {
if (mask & engine->mask)
intel_engine_reset(engine, NULL);
}
}
max = -1;
count = 0;
for (page = 0; page < num_pages; page++) {
dma_addr_t dma = (dma_addr_t)dsm->start + (page << PAGE_SHIFT);
void __iomem *s;
void *in;
u32 x;
ggtt->vm.insert_page(&ggtt->vm, dma,
ggtt->error_capture.start,
i915_gem_get_pat_index(gt->i915,
I915_CACHE_NONE),
0);
mb();
s = io_mapping_map_wc(&ggtt->iomap,
ggtt->error_capture.start,
PAGE_SIZE);
in = (void __force *)s;
if (i915_memcpy_from_wc(tmp, in, PAGE_SIZE))
in = tmp;
x = crc32_le(0, in, PAGE_SIZE);
if (x != crc[page] &&
!__drm_mm_interval_first(>->i915->mm.stolen,
page << PAGE_SHIFT,
((page + 1) << PAGE_SHIFT) - 1)) {
pr_debug("unused stolen page %pa modified by GPU reset\n",
&page);
if (count++ == 0)
igt_hexdump(in, PAGE_SIZE);
max = page;
}
io_mapping_unmap(s);
}
mb();
ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
if (count > 0) {
pr_info("%s reset clobbered %ld pages of stolen, last clobber at page %ld\n",
msg, count, max);
}
if (max >= I915_GEM_STOLEN_BIAS >> PAGE_SHIFT) {
pr_err("%s reset clobbered unreserved area [above %x] of stolen; may cause severe faults\n",
msg, I915_GEM_STOLEN_BIAS);
err = -EINVAL;
}
err_spin:
igt_spinner_fini(&spin);
err_lock:
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
igt_global_reset_unlock(gt);
kfree(tmp);
err_crc:
kfree(crc);
return err;
}
static int igt_reset_device_stolen(void *arg)
{
return __igt_reset_stolen(arg, ALL_ENGINES, "device");
}
static int igt_reset_engines_stolen(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err;
if (!intel_has_reset_engine(gt))
return 0;
for_each_engine(engine, gt, id) {
err = __igt_reset_stolen(gt, engine->mask, engine->name);
if (err)
return err;
}
return 0;
}
static int igt_global_reset(void *arg)
{
struct intel_gt *gt = arg;
unsigned int reset_count;
intel_wakeref_t wakeref;
int err = 0;
/* Check that we can issue a global GPU reset */
igt_global_reset_lock(gt);
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
reset_count = i915_reset_count(>->i915->gpu_error);
intel_gt_reset(gt, ALL_ENGINES, NULL);
if (i915_reset_count(>->i915->gpu_error) == reset_count) {
pr_err("No GPU reset recorded!\n");
err = -EINVAL;
}
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
igt_global_reset_unlock(gt);
if (intel_gt_is_wedged(gt))
err = -EIO;
return err;
}
static int igt_wedged_reset(void *arg)
{
struct intel_gt *gt = arg;
intel_wakeref_t wakeref;
/* Check that we can recover a wedged device with a GPU reset */
igt_global_reset_lock(gt);
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
intel_gt_set_wedged(gt);
GEM_BUG_ON(!intel_gt_is_wedged(gt));
intel_gt_reset(gt, ALL_ENGINES, NULL);
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
igt_global_reset_unlock(gt);
return intel_gt_is_wedged(gt) ? -EIO : 0;
}
static int igt_atomic_reset(void *arg)
{
struct intel_gt *gt = arg;
const typeof(*igt_atomic_phases) *p;
int err = 0;
/* Check that the resets are usable from atomic context */
intel_gt_pm_get(gt);
igt_global_reset_lock(gt);
/* Flush any requests before we get started and check basics */
if (!igt_force_reset(gt))
goto unlock;
for (p = igt_atomic_phases; p->name; p++) {
intel_engine_mask_t awake;
GEM_TRACE("__intel_gt_reset under %s\n", p->name);
awake = reset_prepare(gt);
p->critical_section_begin();
err = __intel_gt_reset(gt, ALL_ENGINES);
p->critical_section_end();
reset_finish(gt, awake);
if (err) {
pr_err("__intel_gt_reset failed under %s\n", p->name);
break;
}
}
/* As we poke around the guts, do a full reset before continuing. */
igt_force_reset(gt);
unlock:
igt_global_reset_unlock(gt);
intel_gt_pm_put(gt);
return err;
}
static int igt_atomic_engine_reset(void *arg)
{
struct intel_gt *gt = arg;
const typeof(*igt_atomic_phases) *p;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = 0;
/* Check that the resets are usable from atomic context */
if (!intel_has_reset_engine(gt))
return 0;
if (intel_uc_uses_guc_submission(>->uc))
return 0;
intel_gt_pm_get(gt);
igt_global_reset_lock(gt);
/* Flush any requests before we get started and check basics */
if (!igt_force_reset(gt))
goto out_unlock;
for_each_engine(engine, gt, id) {
struct tasklet_struct *t = &engine->sched_engine->tasklet;
if (t->func)
tasklet_disable(t);
intel_engine_pm_get(engine);
for (p = igt_atomic_phases; p->name; p++) {
GEM_TRACE("intel_engine_reset(%s) under %s\n",
engine->name, p->name);
if (strcmp(p->name, "softirq"))
local_bh_disable();
p->critical_section_begin();
err = __intel_engine_reset_bh(engine, NULL);
p->critical_section_end();
if (strcmp(p->name, "softirq"))
local_bh_enable();
if (err) {
pr_err("intel_engine_reset(%s) failed under %s\n",
engine->name, p->name);
break;
}
}
intel_engine_pm_put(engine);
if (t->func) {
tasklet_enable(t);
tasklet_hi_schedule(t);
}
if (err)
break;
}
/* As we poke around the guts, do a full reset before continuing. */
igt_force_reset(gt);
out_unlock:
igt_global_reset_unlock(gt);
intel_gt_pm_put(gt);
return err;
}
int intel_reset_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_global_reset), /* attempt to recover GPU first */
SUBTEST(igt_reset_device_stolen),
SUBTEST(igt_reset_engines_stolen),
SUBTEST(igt_wedged_reset),
SUBTEST(igt_atomic_reset),
SUBTEST(igt_atomic_engine_reset),
};
struct intel_gt *gt = to_gt(i915);
if (!intel_has_gpu_reset(gt))
return 0;
if (intel_gt_is_wedged(gt))
return -EIO; /* we're long past hope of a successful reset */
return intel_gt_live_subtests(tests, gt);
}
| linux-master | drivers/gpu/drm/i915/gt/selftest_reset.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
#include "i915_drv.h"
#include "i915_vma.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
#include "intel_gpu_commands.h"
#include "intel_lrc.h"
#include "intel_lrc_reg.h"
#include "intel_ring.h"
#include "intel_sseu.h"
static int gen8_emit_rpcs_config(struct i915_request *rq,
const struct intel_context *ce,
const struct intel_sseu sseu)
{
u64 offset;
u32 *cs;
cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
offset = i915_ggtt_offset(ce->state) +
LRC_STATE_OFFSET + CTX_R_PWR_CLK_STATE * 4;
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = lower_32_bits(offset);
*cs++ = upper_32_bits(offset);
*cs++ = intel_sseu_make_rpcs(rq->engine->gt, &sseu);
intel_ring_advance(rq, cs);
return 0;
}
static int
gen8_modify_rpcs(struct intel_context *ce, const struct intel_sseu sseu)
{
struct i915_request *rq;
int ret;
lockdep_assert_held(&ce->pin_mutex);
/*
* If the context is not idle, we have to submit an ordered request to
* modify its context image via the kernel context (writing to our own
* image, or into the registers directory, does not stick). Pristine
* and idle contexts will be configured on pinning.
*/
if (!intel_context_pin_if_active(ce))
return 0;
rq = intel_engine_create_kernel_request(ce->engine);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
goto out_unpin;
}
/* Serialise with the remote context */
ret = intel_context_prepare_remote_request(ce, rq);
if (ret == 0)
ret = gen8_emit_rpcs_config(rq, ce, sseu);
i915_request_add(rq);
out_unpin:
intel_context_unpin(ce);
return ret;
}
int
intel_context_reconfigure_sseu(struct intel_context *ce,
const struct intel_sseu sseu)
{
int ret;
GEM_BUG_ON(GRAPHICS_VER(ce->engine->i915) < 8);
ret = intel_context_lock_pinned(ce);
if (ret)
return ret;
/* Nothing to do if unmodified. */
if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
goto unlock;
ret = gen8_modify_rpcs(ce, sseu);
if (!ret)
ce->sseu = sseu;
unlock:
intel_context_unlock_pinned(ce);
return ret;
}
| linux-master | drivers/gpu/drm/i915/gt/intel_context_sseu.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*
* Generated by: IGT Gpu Tools on Fri 21 Feb 2020 05:30:13 AM UTC
*/
static const u32 hsw_clear_kernel[] = {
0x00000001, 0x26020128, 0x00000024, 0x00000000,
0x00000040, 0x20280c21, 0x00000028, 0x00000001,
0x01000010, 0x20000c20, 0x0000002c, 0x00000000,
0x00010220, 0x34001c00, 0x00001400, 0x00000160,
0x00600001, 0x20600061, 0x00000000, 0x00000000,
0x00000008, 0x20601c85, 0x00000e00, 0x0000000c,
0x00000005, 0x20601ca5, 0x00000060, 0x00000001,
0x00000008, 0x20641c85, 0x00000e00, 0x0000000d,
0x00000005, 0x20641ca5, 0x00000064, 0x00000003,
0x00000041, 0x207424a5, 0x00000064, 0x00000034,
0x00000040, 0x206014a5, 0x00000060, 0x00000074,
0x00000008, 0x20681c85, 0x00000e00, 0x00000008,
0x00000005, 0x20681ca5, 0x00000068, 0x0000000f,
0x00000041, 0x20701ca5, 0x00000060, 0x00000010,
0x00000040, 0x206814a5, 0x00000068, 0x00000070,
0x00600001, 0x20a00061, 0x00000000, 0x00000000,
0x00000005, 0x206c1c85, 0x00000e00, 0x00000007,
0x00000041, 0x206c1ca5, 0x0000006c, 0x00000004,
0x00600001, 0x20800021, 0x008d0000, 0x00000000,
0x00000001, 0x20800021, 0x0000006c, 0x00000000,
0x00000001, 0x20840021, 0x00000068, 0x00000000,
0x00000001, 0x20880061, 0x00000000, 0x00000003,
0x00000005, 0x208c0d21, 0x00000086, 0xffffffff,
0x05600032, 0x20a00fa1, 0x008d0080, 0x02190001,
0x00000040, 0x20a01ca5, 0x000000a0, 0x00000001,
0x05600032, 0x20a00fa1, 0x008d0080, 0x040a8001,
0x02000040, 0x20281c21, 0x00000028, 0xffffffff,
0x00010220, 0x34001c00, 0x00001400, 0xffffffe0,
0x00000001, 0x26020128, 0x00000024, 0x00000000,
0x00000001, 0x220010e4, 0x00000000, 0x00000000,
0x00000001, 0x220831ec, 0x00000000, 0x007f007f,
0x00600001, 0x20400021, 0x008d0000, 0x00000000,
0x00600001, 0x2fe00021, 0x008d0000, 0x00000000,
0x00200001, 0x20400121, 0x00450020, 0x00000000,
0x00000001, 0x20480061, 0x00000000, 0x000f000f,
0x00000005, 0x204c0d21, 0x00000046, 0xffffffef,
0x00800001, 0x20600061, 0x00000000, 0x00000000,
0x00800001, 0x20800061, 0x00000000, 0x00000000,
0x00800001, 0x20a00061, 0x00000000, 0x00000000,
0x00800001, 0x20c00061, 0x00000000, 0x00000000,
0x00800001, 0x20e00061, 0x00000000, 0x00000000,
0x00800001, 0x21000061, 0x00000000, 0x00000000,
0x00800001, 0x21200061, 0x00000000, 0x00000000,
0x00800001, 0x21400061, 0x00000000, 0x00000000,
0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
0x00000040, 0x20402d21, 0x00000020, 0x00100010,
0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
0x02000040, 0x22083d8c, 0x00000208, 0xffffffff,
0x00800001, 0xa0000109, 0x00000602, 0x00000000,
0x00000040, 0x22001c84, 0x00000200, 0x00000020,
0x00010220, 0x34001c00, 0x00001400, 0xffffffc0,
0x07600032, 0x20000fa0, 0x008d0fe0, 0x82000010,
};
| linux-master | drivers/gpu/drm/i915/gt/hsw_clear_kernel.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_context.h"
#include "intel_engine.h"
#include "intel_engine_pm.h"
#include "intel_ring.h"
static struct kmem_cache *slab_ce;
static struct intel_context *intel_context_alloc(void)
{
return kmem_cache_zalloc(slab_ce, GFP_KERNEL);
}
static void rcu_context_free(struct rcu_head *rcu)
{
struct intel_context *ce = container_of(rcu, typeof(*ce), rcu);
trace_intel_context_free(ce);
kmem_cache_free(slab_ce, ce);
}
void intel_context_free(struct intel_context *ce)
{
call_rcu(&ce->rcu, rcu_context_free);
}
struct intel_context *
intel_context_create(struct intel_engine_cs *engine)
{
struct intel_context *ce;
ce = intel_context_alloc();
if (!ce)
return ERR_PTR(-ENOMEM);
intel_context_init(ce, engine);
trace_intel_context_create(ce);
return ce;
}
int intel_context_alloc_state(struct intel_context *ce)
{
int err = 0;
if (mutex_lock_interruptible(&ce->pin_mutex))
return -EINTR;
if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
if (intel_context_is_banned(ce)) {
err = -EIO;
goto unlock;
}
err = ce->ops->alloc(ce);
if (unlikely(err))
goto unlock;
set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
}
unlock:
mutex_unlock(&ce->pin_mutex);
return err;
}
static int intel_context_active_acquire(struct intel_context *ce)
{
int err;
__i915_active_acquire(&ce->active);
if (intel_context_is_barrier(ce) || intel_engine_uses_guc(ce->engine) ||
intel_context_is_parallel(ce))
return 0;
/* Preallocate tracking nodes */
err = i915_active_acquire_preallocate_barrier(&ce->active,
ce->engine);
if (err)
i915_active_release(&ce->active);
return err;
}
static void intel_context_active_release(struct intel_context *ce)
{
/* Nodes preallocated in intel_context_active() */
i915_active_acquire_barrier(&ce->active);
i915_active_release(&ce->active);
}
static int __context_pin_state(struct i915_vma *vma, struct i915_gem_ww_ctx *ww)
{
unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
int err;
err = i915_ggtt_pin(vma, ww, 0, bias | PIN_HIGH);
if (err)
return err;
err = i915_active_acquire(&vma->active);
if (err)
goto err_unpin;
/*
* And mark it as a globally pinned object to let the shrinker know
* it cannot reclaim the object until we release it.
*/
i915_vma_make_unshrinkable(vma);
vma->obj->mm.dirty = true;
return 0;
err_unpin:
i915_vma_unpin(vma);
return err;
}
static void __context_unpin_state(struct i915_vma *vma)
{
i915_vma_make_shrinkable(vma);
i915_active_release(&vma->active);
__i915_vma_unpin(vma);
}
static int __ring_active(struct intel_ring *ring,
struct i915_gem_ww_ctx *ww)
{
int err;
err = intel_ring_pin(ring, ww);
if (err)
return err;
err = i915_active_acquire(&ring->vma->active);
if (err)
goto err_pin;
return 0;
err_pin:
intel_ring_unpin(ring);
return err;
}
static void __ring_retire(struct intel_ring *ring)
{
i915_active_release(&ring->vma->active);
intel_ring_unpin(ring);
}
static int intel_context_pre_pin(struct intel_context *ce,
struct i915_gem_ww_ctx *ww)
{
int err;
CE_TRACE(ce, "active\n");
err = __ring_active(ce->ring, ww);
if (err)
return err;
err = intel_timeline_pin(ce->timeline, ww);
if (err)
goto err_ring;
if (!ce->state)
return 0;
err = __context_pin_state(ce->state, ww);
if (err)
goto err_timeline;
return 0;
err_timeline:
intel_timeline_unpin(ce->timeline);
err_ring:
__ring_retire(ce->ring);
return err;
}
static void intel_context_post_unpin(struct intel_context *ce)
{
if (ce->state)
__context_unpin_state(ce->state);
intel_timeline_unpin(ce->timeline);
__ring_retire(ce->ring);
}
int __intel_context_do_pin_ww(struct intel_context *ce,
struct i915_gem_ww_ctx *ww)
{
bool handoff = false;
void *vaddr;
int err = 0;
if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
err = intel_context_alloc_state(ce);
if (err)
return err;
}
/*
* We always pin the context/ring/timeline here, to ensure a pin
* refcount for __intel_context_active(), which prevent a lock
* inversion of ce->pin_mutex vs dma_resv_lock().
*/
err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww);
if (!err)
err = i915_gem_object_lock(ce->ring->vma->obj, ww);
if (!err && ce->state)
err = i915_gem_object_lock(ce->state->obj, ww);
if (!err)
err = intel_context_pre_pin(ce, ww);
if (err)
return err;
err = ce->ops->pre_pin(ce, ww, &vaddr);
if (err)
goto err_ctx_unpin;
err = i915_active_acquire(&ce->active);
if (err)
goto err_post_unpin;
err = mutex_lock_interruptible(&ce->pin_mutex);
if (err)
goto err_release;
intel_engine_pm_might_get(ce->engine);
if (unlikely(intel_context_is_closed(ce))) {
err = -ENOENT;
goto err_unlock;
}
if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
err = intel_context_active_acquire(ce);
if (unlikely(err))
goto err_unlock;
err = ce->ops->pin(ce, vaddr);
if (err) {
intel_context_active_release(ce);
goto err_unlock;
}
CE_TRACE(ce, "pin ring:{start:%08x, head:%04x, tail:%04x}\n",
i915_ggtt_offset(ce->ring->vma),
ce->ring->head, ce->ring->tail);
handoff = true;
smp_mb__before_atomic(); /* flush pin before it is visible */
atomic_inc(&ce->pin_count);
}
GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
trace_intel_context_do_pin(ce);
err_unlock:
mutex_unlock(&ce->pin_mutex);
err_release:
i915_active_release(&ce->active);
err_post_unpin:
if (!handoff)
ce->ops->post_unpin(ce);
err_ctx_unpin:
intel_context_post_unpin(ce);
/*
* Unlock the hwsp_ggtt object since it's shared.
* In principle we can unlock all the global state locked above
* since it's pinned and doesn't need fencing, and will
* thus remain resident until it is explicitly unpinned.
*/
i915_gem_ww_unlock_single(ce->timeline->hwsp_ggtt->obj);
return err;
}
int __intel_context_do_pin(struct intel_context *ce)
{
struct i915_gem_ww_ctx ww;
int err;
i915_gem_ww_ctx_init(&ww, true);
retry:
err = __intel_context_do_pin_ww(ce, &ww);
if (err == -EDEADLK) {
err = i915_gem_ww_ctx_backoff(&ww);
if (!err)
goto retry;
}
i915_gem_ww_ctx_fini(&ww);
return err;
}
void __intel_context_do_unpin(struct intel_context *ce, int sub)
{
if (!atomic_sub_and_test(sub, &ce->pin_count))
return;
CE_TRACE(ce, "unpin\n");
ce->ops->unpin(ce);
ce->ops->post_unpin(ce);
/*
* Once released, we may asynchronously drop the active reference.
* As that may be the only reference keeping the context alive,
* take an extra now so that it is not freed before we finish
* dereferencing it.
*/
intel_context_get(ce);
intel_context_active_release(ce);
trace_intel_context_do_unpin(ce);
intel_context_put(ce);
}
static void __intel_context_retire(struct i915_active *active)
{
struct intel_context *ce = container_of(active, typeof(*ce), active);
CE_TRACE(ce, "retire runtime: { total:%lluns, avg:%lluns }\n",
intel_context_get_total_runtime_ns(ce),
intel_context_get_avg_runtime_ns(ce));
set_bit(CONTEXT_VALID_BIT, &ce->flags);
intel_context_post_unpin(ce);
intel_context_put(ce);
}
static int __intel_context_active(struct i915_active *active)
{
struct intel_context *ce = container_of(active, typeof(*ce), active);
intel_context_get(ce);
/* everything should already be activated by intel_context_pre_pin() */
GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->ring->vma->active));
__intel_ring_pin(ce->ring);
__intel_timeline_pin(ce->timeline);
if (ce->state) {
GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->state->active));
__i915_vma_pin(ce->state);
i915_vma_make_unshrinkable(ce->state);
}
return 0;
}
static int
sw_fence_dummy_notify(struct i915_sw_fence *sf,
enum i915_sw_fence_notify state)
{
return NOTIFY_DONE;
}
void
intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
{
GEM_BUG_ON(!engine->cops);
GEM_BUG_ON(!engine->gt->vm);
kref_init(&ce->ref);
ce->engine = engine;
ce->ops = engine->cops;
ce->sseu = engine->sseu;
ce->ring = NULL;
ce->ring_size = SZ_4K;
ewma_runtime_init(&ce->stats.runtime.avg);
ce->vm = i915_vm_get(engine->gt->vm);
/* NB ce->signal_link/lock is used under RCU */
spin_lock_init(&ce->signal_lock);
INIT_LIST_HEAD(&ce->signals);
mutex_init(&ce->pin_mutex);
spin_lock_init(&ce->guc_state.lock);
INIT_LIST_HEAD(&ce->guc_state.fences);
INIT_LIST_HEAD(&ce->guc_state.requests);
ce->guc_id.id = GUC_INVALID_CONTEXT_ID;
INIT_LIST_HEAD(&ce->guc_id.link);
INIT_LIST_HEAD(&ce->destroyed_link);
INIT_LIST_HEAD(&ce->parallel.child_list);
/*
* Initialize fence to be complete as this is expected to be complete
* unless there is a pending schedule disable outstanding.
*/
i915_sw_fence_init(&ce->guc_state.blocked,
sw_fence_dummy_notify);
i915_sw_fence_commit(&ce->guc_state.blocked);
i915_active_init(&ce->active,
__intel_context_active, __intel_context_retire, 0);
}
void intel_context_fini(struct intel_context *ce)
{
struct intel_context *child, *next;
if (ce->timeline)
intel_timeline_put(ce->timeline);
i915_vm_put(ce->vm);
/* Need to put the creation ref for the children */
if (intel_context_is_parent(ce))
for_each_child_safe(ce, child, next)
intel_context_put(child);
mutex_destroy(&ce->pin_mutex);
i915_active_fini(&ce->active);
i915_sw_fence_fini(&ce->guc_state.blocked);
}
void i915_context_module_exit(void)
{
kmem_cache_destroy(slab_ce);
}
int __init i915_context_module_init(void)
{
slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
if (!slab_ce)
return -ENOMEM;
return 0;
}
void intel_context_enter_engine(struct intel_context *ce)
{
intel_engine_pm_get(ce->engine);
intel_timeline_enter(ce->timeline);
}
void intel_context_exit_engine(struct intel_context *ce)
{
intel_timeline_exit(ce->timeline);
intel_engine_pm_put(ce->engine);
}
int intel_context_prepare_remote_request(struct intel_context *ce,
struct i915_request *rq)
{
struct intel_timeline *tl = ce->timeline;
int err;
/* Only suitable for use in remotely modifying this context */
GEM_BUG_ON(rq->context == ce);
if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
/* Queue this switch after current activity by this context. */
err = i915_active_fence_set(&tl->last_request, rq);
if (err)
return err;
}
/*
* Guarantee context image and the timeline remains pinned until the
* modifying request is retired by setting the ce activity tracker.
*
* But we only need to take one pin on the account of it. Or in other
* words transfer the pinned ce object to tracked active request.
*/
GEM_BUG_ON(i915_active_is_idle(&ce->active));
return i915_active_add_request(&ce->active, rq);
}
struct i915_request *intel_context_create_request(struct intel_context *ce)
{
struct i915_gem_ww_ctx ww;
struct i915_request *rq;
int err;
i915_gem_ww_ctx_init(&ww, true);
retry:
err = intel_context_pin_ww(ce, &ww);
if (!err) {
rq = i915_request_create(ce);
intel_context_unpin(ce);
} else if (err == -EDEADLK) {
err = i915_gem_ww_ctx_backoff(&ww);
if (!err)
goto retry;
rq = ERR_PTR(err);
} else {
rq = ERR_PTR(err);
}
i915_gem_ww_ctx_fini(&ww);
if (IS_ERR(rq))
return rq;
/*
* timeline->mutex should be the inner lock, but is used as outer lock.
* Hack around this to shut up lockdep in selftests..
*/
lockdep_unpin_lock(&ce->timeline->mutex, rq->cookie);
mutex_release(&ce->timeline->mutex.dep_map, _RET_IP_);
mutex_acquire(&ce->timeline->mutex.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_);
rq->cookie = lockdep_pin_lock(&ce->timeline->mutex);
return rq;
}
struct i915_request *intel_context_get_active_request(struct intel_context *ce)
{
struct intel_context *parent = intel_context_to_parent(ce);
struct i915_request *rq, *active = NULL;
unsigned long flags;
GEM_BUG_ON(!intel_engine_uses_guc(ce->engine));
/*
* We search the parent list to find an active request on the submitted
* context. The parent list contains the requests for all the contexts
* in the relationship so we have to do a compare of each request's
* context.
*/
spin_lock_irqsave(&parent->guc_state.lock, flags);
list_for_each_entry_reverse(rq, &parent->guc_state.requests,
sched.link) {
if (rq->context != ce)
continue;
if (i915_request_completed(rq))
break;
active = rq;
}
if (active)
active = i915_request_get_rcu(active);
spin_unlock_irqrestore(&parent->guc_state.lock, flags);
return active;
}
void intel_context_bind_parent_child(struct intel_context *parent,
struct intel_context *child)
{
/*
* Callers responsibility to validate that this function is used
* correctly but we use GEM_BUG_ON here ensure that they do.
*/
GEM_BUG_ON(intel_context_is_pinned(parent));
GEM_BUG_ON(intel_context_is_child(parent));
GEM_BUG_ON(intel_context_is_pinned(child));
GEM_BUG_ON(intel_context_is_child(child));
GEM_BUG_ON(intel_context_is_parent(child));
parent->parallel.child_index = parent->parallel.number_children++;
list_add_tail(&child->parallel.child_link,
&parent->parallel.child_list);
child->parallel.parent = parent;
}
u64 intel_context_get_total_runtime_ns(struct intel_context *ce)
{
u64 total, active;
if (ce->ops->update_stats)
ce->ops->update_stats(ce);
total = ce->stats.runtime.total;
if (ce->ops->flags & COPS_RUNTIME_CYCLES)
total *= ce->engine->gt->clock_period_ns;
active = READ_ONCE(ce->stats.active);
if (active)
active = intel_context_clock() - active;
return total + active;
}
u64 intel_context_get_avg_runtime_ns(struct intel_context *ce)
{
u64 avg = ewma_runtime_read(&ce->stats.runtime.avg);
if (ce->ops->flags & COPS_RUNTIME_CYCLES)
avg *= ce->engine->gt->clock_period_ns;
return avg;
}
bool intel_context_ban(struct intel_context *ce, struct i915_request *rq)
{
bool ret = intel_context_set_banned(ce);
trace_intel_context_ban(ce);
if (ce->ops->revoke)
ce->ops->revoke(ce, rq,
INTEL_CONTEXT_BANNED_PREEMPT_TIMEOUT_MS);
return ret;
}
bool intel_context_revoke(struct intel_context *ce)
{
bool ret = intel_context_set_exiting(ce);
if (ce->ops->revoke)
ce->ops->revoke(ce, NULL, ce->engine->props.preempt_timeout_ms);
return ret;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_context.c"
#endif
| linux-master | drivers/gpu/drm/i915/gt/intel_context.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
#include <linux/iosys-map.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/shmem_fs.h>
#include "i915_drv.h"
#include "gem/i915_gem_object.h"
#include "gem/i915_gem_lmem.h"
#include "shmem_utils.h"
struct file *shmem_create_from_data(const char *name, void *data, size_t len)
{
struct file *file;
int err;
file = shmem_file_setup(name, PAGE_ALIGN(len), VM_NORESERVE);
if (IS_ERR(file))
return file;
err = shmem_write(file, 0, data, len);
if (err) {
fput(file);
return ERR_PTR(err);
}
return file;
}
struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
{
enum i915_map_type map_type;
struct file *file;
void *ptr;
if (i915_gem_object_is_shmem(obj)) {
file = obj->base.filp;
atomic_long_inc(&file->f_count);
return file;
}
map_type = i915_gem_object_is_lmem(obj) ? I915_MAP_WC : I915_MAP_WB;
ptr = i915_gem_object_pin_map_unlocked(obj, map_type);
if (IS_ERR(ptr))
return ERR_CAST(ptr);
file = shmem_create_from_data("", ptr, obj->base.size);
i915_gem_object_unpin_map(obj);
return file;
}
void *shmem_pin_map(struct file *file)
{
struct page **pages;
size_t n_pages, i;
void *vaddr;
n_pages = file->f_mapping->host->i_size >> PAGE_SHIFT;
pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
if (!pages)
return NULL;
for (i = 0; i < n_pages; i++) {
pages[i] = shmem_read_mapping_page_gfp(file->f_mapping, i,
GFP_KERNEL);
if (IS_ERR(pages[i]))
goto err_page;
}
vaddr = vmap(pages, n_pages, VM_MAP_PUT_PAGES, PAGE_KERNEL);
if (!vaddr)
goto err_page;
mapping_set_unevictable(file->f_mapping);
return vaddr;
err_page:
while (i--)
put_page(pages[i]);
kvfree(pages);
return NULL;
}
void shmem_unpin_map(struct file *file, void *ptr)
{
mapping_clear_unevictable(file->f_mapping);
vfree(ptr);
}
static int __shmem_rw(struct file *file, loff_t off,
void *ptr, size_t len,
bool write)
{
unsigned long pfn;
for (pfn = off >> PAGE_SHIFT; len; pfn++) {
unsigned int this =
min_t(size_t, PAGE_SIZE - offset_in_page(off), len);
struct page *page;
void *vaddr;
page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
GFP_KERNEL);
if (IS_ERR(page))
return PTR_ERR(page);
vaddr = kmap(page);
if (write) {
memcpy(vaddr + offset_in_page(off), ptr, this);
set_page_dirty(page);
} else {
memcpy(ptr, vaddr + offset_in_page(off), this);
}
mark_page_accessed(page);
kunmap(page);
put_page(page);
len -= this;
ptr += this;
off = 0;
}
return 0;
}
int shmem_read_to_iosys_map(struct file *file, loff_t off,
struct iosys_map *map, size_t map_off, size_t len)
{
unsigned long pfn;
for (pfn = off >> PAGE_SHIFT; len; pfn++) {
unsigned int this =
min_t(size_t, PAGE_SIZE - offset_in_page(off), len);
struct page *page;
void *vaddr;
page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
GFP_KERNEL);
if (IS_ERR(page))
return PTR_ERR(page);
vaddr = kmap(page);
iosys_map_memcpy_to(map, map_off, vaddr + offset_in_page(off),
this);
mark_page_accessed(page);
kunmap(page);
put_page(page);
len -= this;
map_off += this;
off = 0;
}
return 0;
}
int shmem_read(struct file *file, loff_t off, void *dst, size_t len)
{
return __shmem_rw(file, off, dst, len, false);
}
int shmem_write(struct file *file, loff_t off, void *src, size_t len)
{
return __shmem_rw(file, off, src, len, true);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "st_shmem_utils.c"
#endif
| linux-master | drivers/gpu/drm/i915/gt/shmem_utils.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2008-2015 Intel Corporation
*/
#include <linux/highmem.h>
#include "display/intel_display.h"
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_scatterlist.h"
#include "i915_pvinfo.h"
#include "i915_vgpu.h"
#include "intel_gt_regs.h"
#include "intel_mchbar_regs.h"
/**
* DOC: fence register handling
*
* Important to avoid confusions: "fences" in the i915 driver are not execution
* fences used to track command completion but hardware detiler objects which
* wrap a given range of the global GTT. Each platform has only a fairly limited
* set of these objects.
*
* Fences are used to detile GTT memory mappings. They're also connected to the
* hardware frontbuffer render tracking and hence interact with frontbuffer
* compression. Furthermore on older platforms fences are required for tiled
* objects used by the display engine. They can also be used by the render
* engine - they're required for blitter commands and are optional for render
* commands. But on gen4+ both display (with the exception of fbc) and rendering
* have their own tiling state bits and don't need fences.
*
* Also note that fences only support X and Y tiling and hence can't be used for
* the fancier new tiling formats like W, Ys and Yf.
*
* Finally note that because fences are such a restricted resource they're
* dynamically associated with objects. Furthermore fence state is committed to
* the hardware lazily to avoid unnecessary stalls on gen2/3. Therefore code must
* explicitly call i915_gem_object_get_fence() to synchronize fencing status
* for cpu access. Also note that some code wants an unfenced view, for those
* cases the fence can be removed forcefully with i915_gem_object_put_fence().
*
* Internally these functions will synchronize with userspace access by removing
* CPU ptes into GTT mmaps (not the GTT ptes themselves) as needed.
*/
#define pipelined 0
static struct drm_i915_private *fence_to_i915(struct i915_fence_reg *fence)
{
return fence->ggtt->vm.i915;
}
static struct intel_uncore *fence_to_uncore(struct i915_fence_reg *fence)
{
return fence->ggtt->vm.gt->uncore;
}
static void i965_write_fence_reg(struct i915_fence_reg *fence)
{
i915_reg_t fence_reg_lo, fence_reg_hi;
int fence_pitch_shift;
u64 val;
if (GRAPHICS_VER(fence_to_i915(fence)) >= 6) {
fence_reg_lo = FENCE_REG_GEN6_LO(fence->id);
fence_reg_hi = FENCE_REG_GEN6_HI(fence->id);
fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
} else {
fence_reg_lo = FENCE_REG_965_LO(fence->id);
fence_reg_hi = FENCE_REG_965_HI(fence->id);
fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
}
val = 0;
if (fence->tiling) {
unsigned int stride = fence->stride;
GEM_BUG_ON(!IS_ALIGNED(stride, 128));
val = fence->start + fence->size - I965_FENCE_PAGE;
val <<= 32;
val |= fence->start;
val |= (u64)((stride / 128) - 1) << fence_pitch_shift;
if (fence->tiling == I915_TILING_Y)
val |= BIT(I965_FENCE_TILING_Y_SHIFT);
val |= I965_FENCE_REG_VALID;
}
if (!pipelined) {
struct intel_uncore *uncore = fence_to_uncore(fence);
/*
* To w/a incoherency with non-atomic 64-bit register updates,
* we split the 64-bit update into two 32-bit writes. In order
* for a partial fence not to be evaluated between writes, we
* precede the update with write to turn off the fence register,
* and only enable the fence as the last step.
*
* For extra levels of paranoia, we make sure each step lands
* before applying the next step.
*/
intel_uncore_write_fw(uncore, fence_reg_lo, 0);
intel_uncore_posting_read_fw(uncore, fence_reg_lo);
intel_uncore_write_fw(uncore, fence_reg_hi, upper_32_bits(val));
intel_uncore_write_fw(uncore, fence_reg_lo, lower_32_bits(val));
intel_uncore_posting_read_fw(uncore, fence_reg_lo);
}
}
static void i915_write_fence_reg(struct i915_fence_reg *fence)
{
u32 val;
val = 0;
if (fence->tiling) {
unsigned int stride = fence->stride;
unsigned int tiling = fence->tiling;
bool is_y_tiled = tiling == I915_TILING_Y;
if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence_to_i915(fence)))
stride /= 128;
else
stride /= 512;
GEM_BUG_ON(!is_power_of_2(stride));
val = fence->start;
if (is_y_tiled)
val |= BIT(I830_FENCE_TILING_Y_SHIFT);
val |= I915_FENCE_SIZE_BITS(fence->size);
val |= ilog2(stride) << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
}
if (!pipelined) {
struct intel_uncore *uncore = fence_to_uncore(fence);
i915_reg_t reg = FENCE_REG(fence->id);
intel_uncore_write_fw(uncore, reg, val);
intel_uncore_posting_read_fw(uncore, reg);
}
}
static void i830_write_fence_reg(struct i915_fence_reg *fence)
{
u32 val;
val = 0;
if (fence->tiling) {
unsigned int stride = fence->stride;
val = fence->start;
if (fence->tiling == I915_TILING_Y)
val |= BIT(I830_FENCE_TILING_Y_SHIFT);
val |= I830_FENCE_SIZE_BITS(fence->size);
val |= ilog2(stride / 128) << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
}
if (!pipelined) {
struct intel_uncore *uncore = fence_to_uncore(fence);
i915_reg_t reg = FENCE_REG(fence->id);
intel_uncore_write_fw(uncore, reg, val);
intel_uncore_posting_read_fw(uncore, reg);
}
}
static void fence_write(struct i915_fence_reg *fence)
{
struct drm_i915_private *i915 = fence_to_i915(fence);
/*
* Previous access through the fence register is marshalled by
* the mb() inside the fault handlers (i915_gem_release_mmaps)
* and explicitly managed for internal users.
*/
if (GRAPHICS_VER(i915) == 2)
i830_write_fence_reg(fence);
else if (GRAPHICS_VER(i915) == 3)
i915_write_fence_reg(fence);
else
i965_write_fence_reg(fence);
/*
* Access through the fenced region afterwards is
* ordered by the posting reads whilst writing the registers.
*/
}
static bool gpu_uses_fence_registers(struct i915_fence_reg *fence)
{
return GRAPHICS_VER(fence_to_i915(fence)) < 4;
}
static int fence_update(struct i915_fence_reg *fence,
struct i915_vma *vma)
{
struct i915_ggtt *ggtt = fence->ggtt;
struct intel_uncore *uncore = fence_to_uncore(fence);
intel_wakeref_t wakeref;
struct i915_vma *old;
int ret;
fence->tiling = 0;
if (vma) {
GEM_BUG_ON(!i915_gem_object_get_stride(vma->obj) ||
!i915_gem_object_get_tiling(vma->obj));
if (!i915_vma_is_map_and_fenceable(vma))
return -EINVAL;
if (gpu_uses_fence_registers(fence)) {
/* implicit 'unfenced' GPU blits */
ret = i915_vma_sync(vma);
if (ret)
return ret;
}
GEM_BUG_ON(vma->fence_size > i915_vma_size(vma));
fence->start = i915_ggtt_offset(vma);
fence->size = vma->fence_size;
fence->stride = i915_gem_object_get_stride(vma->obj);
fence->tiling = i915_gem_object_get_tiling(vma->obj);
}
WRITE_ONCE(fence->dirty, false);
old = xchg(&fence->vma, NULL);
if (old) {
/* XXX Ideally we would move the waiting to outside the mutex */
ret = i915_active_wait(&fence->active);
if (ret) {
fence->vma = old;
return ret;
}
i915_vma_flush_writes(old);
/*
* Ensure that all userspace CPU access is completed before
* stealing the fence.
*/
if (old != vma) {
GEM_BUG_ON(old->fence != fence);
i915_vma_revoke_mmap(old);
old->fence = NULL;
}
list_move(&fence->link, &ggtt->fence_list);
}
/*
* We only need to update the register itself if the device is awake.
* If the device is currently powered down, we will defer the write
* to the runtime resume, see intel_ggtt_restore_fences().
*
* This only works for removing the fence register, on acquisition
* the caller must hold the rpm wakeref. The fence register must
* be cleared before we can use any other fences to ensure that
* the new fences do not overlap the elided clears, confusing HW.
*/
wakeref = intel_runtime_pm_get_if_in_use(uncore->rpm);
if (!wakeref) {
GEM_BUG_ON(vma);
return 0;
}
WRITE_ONCE(fence->vma, vma);
fence_write(fence);
if (vma) {
vma->fence = fence;
list_move_tail(&fence->link, &ggtt->fence_list);
}
intel_runtime_pm_put(uncore->rpm, wakeref);
return 0;
}
/**
* i915_vma_revoke_fence - force-remove fence for a VMA
* @vma: vma to map linearly (not through a fence reg)
*
* This function force-removes any fence from the given object, which is useful
* if the kernel wants to do untiled GTT access.
*/
void i915_vma_revoke_fence(struct i915_vma *vma)
{
struct i915_fence_reg *fence = vma->fence;
intel_wakeref_t wakeref;
lockdep_assert_held(&vma->vm->mutex);
if (!fence)
return;
GEM_BUG_ON(fence->vma != vma);
GEM_BUG_ON(!i915_active_is_idle(&fence->active));
GEM_BUG_ON(atomic_read(&fence->pin_count));
fence->tiling = 0;
WRITE_ONCE(fence->vma, NULL);
vma->fence = NULL;
/*
* Skip the write to HW if and only if the device is currently
* suspended.
*
* If the driver does not currently hold a wakeref (if_in_use == 0),
* the device may currently be runtime suspended, or it may be woken
* up before the suspend takes place. If the device is not suspended
* (powered down) and we skip clearing the fence register, the HW is
* left in an undefined state where we may end up with multiple
* registers overlapping.
*/
with_intel_runtime_pm_if_active(fence_to_uncore(fence)->rpm, wakeref)
fence_write(fence);
}
static bool fence_is_active(const struct i915_fence_reg *fence)
{
return fence->vma && i915_vma_is_active(fence->vma);
}
static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
{
struct i915_fence_reg *active = NULL;
struct i915_fence_reg *fence, *fn;
list_for_each_entry_safe(fence, fn, &ggtt->fence_list, link) {
GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
if (fence == active) /* now seen this fence twice */
active = ERR_PTR(-EAGAIN);
/* Prefer idle fences so we do not have to wait on the GPU */
if (active != ERR_PTR(-EAGAIN) && fence_is_active(fence)) {
if (!active)
active = fence;
list_move_tail(&fence->link, &ggtt->fence_list);
continue;
}
if (atomic_read(&fence->pin_count))
continue;
return fence;
}
/* Wait for completion of pending flips which consume fences */
if (intel_has_pending_fb_unpin(ggtt->vm.i915))
return ERR_PTR(-EAGAIN);
return ERR_PTR(-ENOBUFS);
}
int __i915_vma_pin_fence(struct i915_vma *vma)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
struct i915_fence_reg *fence;
struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
int err;
lockdep_assert_held(&vma->vm->mutex);
/* Just update our place in the LRU if our fence is getting reused. */
if (vma->fence) {
fence = vma->fence;
GEM_BUG_ON(fence->vma != vma);
atomic_inc(&fence->pin_count);
if (!fence->dirty) {
list_move_tail(&fence->link, &ggtt->fence_list);
return 0;
}
} else if (set) {
fence = fence_find(ggtt);
if (IS_ERR(fence))
return PTR_ERR(fence);
GEM_BUG_ON(atomic_read(&fence->pin_count));
atomic_inc(&fence->pin_count);
} else {
return 0;
}
err = fence_update(fence, set);
if (err)
goto out_unpin;
GEM_BUG_ON(fence->vma != set);
GEM_BUG_ON(vma->fence != (set ? fence : NULL));
if (set)
return 0;
out_unpin:
atomic_dec(&fence->pin_count);
return err;
}
/**
* i915_vma_pin_fence - set up fencing for a vma
* @vma: vma to map through a fence reg
*
* When mapping objects through the GTT, userspace wants to be able to write
* to them without having to worry about swizzling if the object is tiled.
* This function walks the fence regs looking for a free one for @obj,
* stealing one if it can't find any.
*
* It then sets up the reg based on the object's properties: address, pitch
* and tiling format.
*
* For an untiled surface, this removes any existing fence.
*
* Returns:
*
* 0 on success, negative error code on failure.
*/
int i915_vma_pin_fence(struct i915_vma *vma)
{
int err;
if (!vma->fence && !i915_gem_object_is_tiled(vma->obj))
return 0;
/*
* Note that we revoke fences on runtime suspend. Therefore the user
* must keep the device awake whilst using the fence.
*/
assert_rpm_wakelock_held(vma->vm->gt->uncore->rpm);
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
err = mutex_lock_interruptible(&vma->vm->mutex);
if (err)
return err;
err = __i915_vma_pin_fence(vma);
mutex_unlock(&vma->vm->mutex);
return err;
}
/**
* i915_reserve_fence - Reserve a fence for vGPU
* @ggtt: Global GTT
*
* This function walks the fence regs looking for a free one and remove
* it from the fence_list. It is used to reserve fence for vGPU to use.
*/
struct i915_fence_reg *i915_reserve_fence(struct i915_ggtt *ggtt)
{
struct i915_fence_reg *fence;
int count;
int ret;
lockdep_assert_held(&ggtt->vm.mutex);
/* Keep at least one fence available for the display engine. */
count = 0;
list_for_each_entry(fence, &ggtt->fence_list, link)
count += !atomic_read(&fence->pin_count);
if (count <= 1)
return ERR_PTR(-ENOSPC);
fence = fence_find(ggtt);
if (IS_ERR(fence))
return fence;
if (fence->vma) {
/* Force-remove fence from VMA */
ret = fence_update(fence, NULL);
if (ret)
return ERR_PTR(ret);
}
list_del(&fence->link);
return fence;
}
/**
* i915_unreserve_fence - Reclaim a reserved fence
* @fence: the fence reg
*
* This function add a reserved fence register from vGPU to the fence_list.
*/
void i915_unreserve_fence(struct i915_fence_reg *fence)
{
struct i915_ggtt *ggtt = fence->ggtt;
lockdep_assert_held(&ggtt->vm.mutex);
list_add(&fence->link, &ggtt->fence_list);
}
/**
* intel_ggtt_restore_fences - restore fence state
* @ggtt: Global GTT
*
* Restore the hw fence state to match the software tracking again, to be called
* after a gpu reset and on resume. Note that on runtime suspend we only cancel
* the fences, to be reacquired by the user later.
*/
void intel_ggtt_restore_fences(struct i915_ggtt *ggtt)
{
int i;
for (i = 0; i < ggtt->num_fences; i++)
fence_write(&ggtt->fence_regs[i]);
}
/**
* DOC: tiling swizzling details
*
* The idea behind tiling is to increase cache hit rates by rearranging
* pixel data so that a group of pixel accesses are in the same cacheline.
* Performance improvement from doing this on the back/depth buffer are on
* the order of 30%.
*
* Intel architectures make this somewhat more complicated, though, by
* adjustments made to addressing of data when the memory is in interleaved
* mode (matched pairs of DIMMS) to improve memory bandwidth.
* For interleaved memory, the CPU sends every sequential 64 bytes
* to an alternate memory channel so it can get the bandwidth from both.
*
* The GPU also rearranges its accesses for increased bandwidth to interleaved
* memory, and it matches what the CPU does for non-tiled. However, when tiled
* it does it a little differently, since one walks addresses not just in the
* X direction but also Y. So, along with alternating channels when bit
* 6 of the address flips, it also alternates when other bits flip -- Bits 9
* (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
* are common to both the 915 and 965-class hardware.
*
* The CPU also sometimes XORs in higher bits as well, to improve
* bandwidth doing strided access like we do so frequently in graphics. This
* is called "Channel XOR Randomization" in the MCH documentation. The result
* is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
* decode.
*
* All of this bit 6 XORing has an effect on our memory management,
* as we need to make sure that the 3d driver can correctly address object
* contents.
*
* If we don't have interleaved memory, all tiling is safe and no swizzling is
* required.
*
* When bit 17 is XORed in, we simply refuse to tile at all. Bit
* 17 is not just a page offset, so as we page an object out and back in,
* individual pages in it will have different bit 17 addresses, resulting in
* each 64 bytes being swapped with its neighbor!
*
* Otherwise, if interleaved, we have to tell the 3d driver what the address
* swizzling it needs to do is, since it's writing with the CPU to the pages
* (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
* pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
* required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
* to match what the GPU expects.
*/
/**
* detect_bit_6_swizzle - detect bit 6 swizzling pattern
* @ggtt: Global GGTT
*
* Detects bit 6 swizzling of address lookup between IGD access and CPU
* access through main memory.
*/
static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
{
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
struct drm_i915_private *i915 = ggtt->vm.i915;
u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
u32 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
if (GRAPHICS_VER(i915) >= 8 || IS_VALLEYVIEW(i915)) {
/*
* On BDW+, swizzling is not used. We leave the CPU memory
* controller in charge of optimizing memory accesses without
* the extra address manipulation GPU side.
*
* VLV and CHV don't have GPU swizzling.
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else if (GRAPHICS_VER(i915) >= 6) {
if (i915->preserve_bios_swizzle) {
if (intel_uncore_read(uncore, DISP_ARB_CTL) &
DISP_TILE_SURFACE_SWIZZLING) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else {
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
} else {
u32 dimm_c0, dimm_c1;
dimm_c0 = intel_uncore_read(uncore, MAD_DIMM_C0);
dimm_c1 = intel_uncore_read(uncore, MAD_DIMM_C1);
dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
/*
* Enable swizzling when the channels are populated
* with identically sized dimms. We don't need to check
* the 3rd channel because no cpu with gpu attached
* ships in that configuration. Also, swizzling only
* makes sense for 2 channels anyway.
*/
if (dimm_c0 == dimm_c1) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else {
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
}
} else if (GRAPHICS_VER(i915) == 5) {
/*
* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else if (GRAPHICS_VER(i915) == 2) {
/*
* As far as we know, the 865 doesn't have these bit 6
* swizzling issues.
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else if (IS_G45(i915) || IS_I965G(i915) || IS_G33(i915)) {
/*
* The 965, G33, and newer, have a very flexible memory
* configuration. It will enable dual-channel mode
* (interleaving) on as much memory as it can, and the GPU
* will additionally sometimes enable different bit 6
* swizzling for tiled objects from the CPU.
*
* Here's what I found on the G965:
* slot fill memory size swizzling
* 0A 0B 1A 1B 1-ch 2-ch
* 512 0 0 0 512 0 O
* 512 0 512 0 16 1008 X
* 512 0 0 512 16 1008 X
* 0 512 0 512 16 1008 X
* 1024 1024 1024 0 2048 1024 O
*
* We could probably detect this based on either the DRB
* matching, which was the case for the swizzling required in
* the table above, or from the 1-ch value being less than
* the minimum size of a rank.
*
* Reports indicate that the swizzling actually
* varies depending upon page placement inside the
* channels, i.e. we see swizzled pages where the
* banks of memory are paired and unswizzled on the
* uneven portion, so leave that as unknown.
*/
if (intel_uncore_read16(uncore, C0DRB3_BW) ==
intel_uncore_read16(uncore, C1DRB3_BW)) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
}
} else {
u32 dcc = intel_uncore_read(uncore, DCC);
/*
* On 9xx chipsets, channel interleave by the CPU is
* determined by DCC. For single-channel, neither the CPU
* nor the GPU do swizzling. For dual channel interleaved,
* the GPU's interleave is bit 9 and 10 for X tiled, and bit
* 9 for Y tiled. The CPU's interleave is independent, and
* can be based on either bit 11 (haven't seen this yet) or
* bit 17 (common).
*/
switch (dcc & DCC_ADDRESSING_MODE_MASK) {
case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
break;
case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
if (dcc & DCC_CHANNEL_XOR_DISABLE) {
/*
* This is the base swizzling by the GPU for
* tiled buffers.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
/* Bit 11 swizzling by the CPU in addition. */
swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
swizzle_y = I915_BIT_6_SWIZZLE_9_11;
} else {
/* Bit 17 swizzling by the CPU in addition. */
swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
swizzle_y = I915_BIT_6_SWIZZLE_9_17;
}
break;
}
/* check for L-shaped memory aka modified enhanced addressing */
if (GRAPHICS_VER(i915) == 4 &&
!(intel_uncore_read(uncore, DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
}
if (dcc == 0xffffffff) {
drm_err(&i915->drm, "Couldn't read from MCHBAR. "
"Disabling tiling.\n");
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
}
}
if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN ||
swizzle_y == I915_BIT_6_SWIZZLE_UNKNOWN) {
/*
* Userspace likes to explode if it sees unknown swizzling,
* so lie. We will finish the lie when reporting through
* the get-tiling-ioctl by reporting the physical swizzle
* mode as unknown instead.
*
* As we don't strictly know what the swizzling is, it may be
* bit17 dependent, and so we need to also prevent the pages
* from being moved.
*/
i915->gem_quirks |= GEM_QUIRK_PIN_SWIZZLED_PAGES;
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
to_gt(i915)->ggtt->bit_6_swizzle_x = swizzle_x;
to_gt(i915)->ggtt->bit_6_swizzle_y = swizzle_y;
}
/*
* Swap every 64 bytes of this page around, to account for it having a new
* bit 17 of its physical address and therefore being interpreted differently
* by the GPU.
*/
static void swizzle_page(struct page *page)
{
char temp[64];
char *vaddr;
int i;
vaddr = kmap(page);
for (i = 0; i < PAGE_SIZE; i += 128) {
memcpy(temp, &vaddr[i], 64);
memcpy(&vaddr[i], &vaddr[i + 64], 64);
memcpy(&vaddr[i + 64], temp, 64);
}
kunmap(page);
}
/**
* i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling
* @obj: i915 GEM buffer object
* @pages: the scattergather list of physical pages
*
* This function fixes up the swizzling in case any page frame number for this
* object has changed in bit 17 since that state has been saved with
* i915_gem_object_save_bit_17_swizzle().
*
* This is called when pinning backing storage again, since the kernel is free
* to move unpinned backing storage around (either by directly moving pages or
* by swapping them out and back in again).
*/
void
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
struct sgt_iter sgt_iter;
struct page *page;
int i;
if (obj->bit_17 == NULL)
return;
i = 0;
for_each_sgt_page(page, sgt_iter, pages) {
char new_bit_17 = page_to_phys(page) >> 17;
if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
swizzle_page(page);
set_page_dirty(page);
}
i++;
}
}
/**
* i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling
* @obj: i915 GEM buffer object
* @pages: the scattergather list of physical pages
*
* This function saves the bit 17 of each page frame number so that swizzling
* can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must
* be called before the backing storage can be unpinned.
*/
void
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
const unsigned int page_count = obj->base.size >> PAGE_SHIFT;
struct sgt_iter sgt_iter;
struct page *page;
int i;
if (obj->bit_17 == NULL) {
obj->bit_17 = bitmap_zalloc(page_count, GFP_KERNEL);
if (obj->bit_17 == NULL) {
drm_err(obj->base.dev,
"Failed to allocate memory for bit 17 record\n");
return;
}
}
i = 0;
for_each_sgt_page(page, sgt_iter, pages) {
if (page_to_phys(page) & (1 << 17))
__set_bit(i, obj->bit_17);
else
__clear_bit(i, obj->bit_17);
i++;
}
}
void intel_ggtt_init_fences(struct i915_ggtt *ggtt)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
int num_fences;
int i;
INIT_LIST_HEAD(&ggtt->fence_list);
INIT_LIST_HEAD(&ggtt->userfault_list);
detect_bit_6_swizzle(ggtt);
if (!i915_ggtt_has_aperture(ggtt))
num_fences = 0;
else if (GRAPHICS_VER(i915) >= 7 &&
!(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)))
num_fences = 32;
else if (GRAPHICS_VER(i915) >= 4 ||
IS_I945G(i915) || IS_I945GM(i915) ||
IS_G33(i915) || IS_PINEVIEW(i915))
num_fences = 16;
else
num_fences = 8;
if (intel_vgpu_active(i915))
num_fences = intel_uncore_read(uncore,
vgtif_reg(avail_rs.fence_num));
ggtt->fence_regs = kcalloc(num_fences,
sizeof(*ggtt->fence_regs),
GFP_KERNEL);
if (!ggtt->fence_regs)
num_fences = 0;
/* Initialize fence registers to zero */
for (i = 0; i < num_fences; i++) {
struct i915_fence_reg *fence = &ggtt->fence_regs[i];
i915_active_init(&fence->active, NULL, NULL, 0);
fence->ggtt = ggtt;
fence->id = i;
list_add_tail(&fence->link, &ggtt->fence_list);
}
ggtt->num_fences = num_fences;
intel_ggtt_restore_fences(ggtt);
}
void intel_ggtt_fini_fences(struct i915_ggtt *ggtt)
{
int i;
for (i = 0; i < ggtt->num_fences; i++) {
struct i915_fence_reg *fence = &ggtt->fence_regs[i];
i915_active_fini(&fence->active);
}
kfree(ggtt->fence_regs);
}
void intel_gt_init_swizzling(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
if (GRAPHICS_VER(i915) < 5 ||
to_gt(i915)->ggtt->bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
return;
intel_uncore_rmw(uncore, DISP_ARB_CTL, 0, DISP_TILE_SURFACE_SWIZZLING);
if (GRAPHICS_VER(i915) == 5)
return;
intel_uncore_rmw(uncore, TILECTL, 0, TILECTL_SWZCTL);
if (GRAPHICS_VER(i915) == 6)
intel_uncore_write(uncore,
ARB_MODE,
_MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
else if (GRAPHICS_VER(i915) == 7)
intel_uncore_write(uncore,
ARB_MODE,
_MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
else if (GRAPHICS_VER(i915) == 8)
intel_uncore_write(uncore,
GAMTARBMODE,
_MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
else
MISSING_CASE(GRAPHICS_VER(i915));
}
| linux-master | drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
#include <linux/workqueue.h>
#include "i915_drv.h" /* for_each_engine() */
#include "i915_request.h"
#include "intel_engine_heartbeat.h"
#include "intel_execlists_submission.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
#include "intel_timeline.h"
static bool retire_requests(struct intel_timeline *tl)
{
struct i915_request *rq, *rn;
list_for_each_entry_safe(rq, rn, &tl->requests, link)
if (!i915_request_retire(rq))
return false;
/* And check nothing new was submitted */
return !i915_active_fence_isset(&tl->last_request);
}
static bool engine_active(const struct intel_engine_cs *engine)
{
return !list_empty(&engine->kernel_context->timeline->requests);
}
static bool flush_submission(struct intel_gt *gt, long timeout)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
bool active = false;
if (!timeout)
return false;
if (!intel_gt_pm_is_awake(gt))
return false;
for_each_engine(engine, gt, id) {
intel_engine_flush_submission(engine);
/* Flush the background retirement and idle barriers */
flush_work(&engine->retire_work);
flush_delayed_work(&engine->wakeref.work);
/* Is the idle barrier still outstanding? */
active |= engine_active(engine);
}
return active;
}
static void engine_retire(struct work_struct *work)
{
struct intel_engine_cs *engine =
container_of(work, typeof(*engine), retire_work);
struct intel_timeline *tl = xchg(&engine->retire, NULL);
do {
struct intel_timeline *next = xchg(&tl->retire, NULL);
/*
* Our goal here is to retire _idle_ timelines as soon as
* possible (as they are idle, we do not expect userspace
* to be cleaning up anytime soon).
*
* If the timeline is currently locked, either it is being
* retired elsewhere or about to be!
*/
if (mutex_trylock(&tl->mutex)) {
retire_requests(tl);
mutex_unlock(&tl->mutex);
}
intel_timeline_put(tl);
GEM_BUG_ON(!next);
tl = ptr_mask_bits(next, 1);
} while (tl);
}
static bool add_retire(struct intel_engine_cs *engine,
struct intel_timeline *tl)
{
#define STUB ((struct intel_timeline *)1)
struct intel_timeline *first;
/*
* We open-code a llist here to include the additional tag [BIT(0)]
* so that we know when the timeline is already on a
* retirement queue: either this engine or another.
*/
if (cmpxchg(&tl->retire, NULL, STUB)) /* already queued */
return false;
intel_timeline_get(tl);
first = READ_ONCE(engine->retire);
do
tl->retire = ptr_pack_bits(first, 1, 1);
while (!try_cmpxchg(&engine->retire, &first, tl));
return !first;
}
void intel_engine_add_retire(struct intel_engine_cs *engine,
struct intel_timeline *tl)
{
/* We don't deal well with the engine disappearing beneath us */
GEM_BUG_ON(intel_engine_is_virtual(engine));
if (add_retire(engine, tl))
queue_work(engine->i915->unordered_wq, &engine->retire_work);
}
void intel_engine_init_retire(struct intel_engine_cs *engine)
{
INIT_WORK(&engine->retire_work, engine_retire);
}
void intel_engine_fini_retire(struct intel_engine_cs *engine)
{
flush_work(&engine->retire_work);
GEM_BUG_ON(engine->retire);
}
long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout,
long *remaining_timeout)
{
struct intel_gt_timelines *timelines = >->timelines;
struct intel_timeline *tl, *tn;
unsigned long active_count = 0;
LIST_HEAD(free);
flush_submission(gt, timeout); /* kick the ksoftirqd tasklets */
spin_lock(&timelines->lock);
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
if (!mutex_trylock(&tl->mutex)) {
active_count++; /* report busy to caller, try again? */
continue;
}
intel_timeline_get(tl);
GEM_BUG_ON(!atomic_read(&tl->active_count));
atomic_inc(&tl->active_count); /* pin the list element */
spin_unlock(&timelines->lock);
if (timeout > 0) {
struct dma_fence *fence;
fence = i915_active_fence_get(&tl->last_request);
if (fence) {
mutex_unlock(&tl->mutex);
timeout = dma_fence_wait_timeout(fence,
true,
timeout);
dma_fence_put(fence);
/* Retirement is best effort */
if (!mutex_trylock(&tl->mutex)) {
active_count++;
goto out_active;
}
}
}
if (!retire_requests(tl))
active_count++;
mutex_unlock(&tl->mutex);
out_active: spin_lock(&timelines->lock);
/* Resume list iteration after reacquiring spinlock */
list_safe_reset_next(tl, tn, link);
if (atomic_dec_and_test(&tl->active_count))
list_del(&tl->link);
/* Defer the final release to after the spinlock */
if (refcount_dec_and_test(&tl->kref.refcount)) {
GEM_BUG_ON(atomic_read(&tl->active_count));
list_add(&tl->link, &free);
}
}
spin_unlock(&timelines->lock);
list_for_each_entry_safe(tl, tn, &free, link)
__intel_timeline_free(&tl->kref);
if (flush_submission(gt, timeout)) /* Wait, there's more! */
active_count++;
if (remaining_timeout)
*remaining_timeout = timeout;
return active_count ? timeout ?: -ETIME : 0;
}
static void retire_work_handler(struct work_struct *work)
{
struct intel_gt *gt =
container_of(work, typeof(*gt), requests.retire_work.work);
queue_delayed_work(gt->i915->unordered_wq, >->requests.retire_work,
round_jiffies_up_relative(HZ));
intel_gt_retire_requests(gt);
}
void intel_gt_init_requests(struct intel_gt *gt)
{
INIT_DELAYED_WORK(>->requests.retire_work, retire_work_handler);
}
void intel_gt_park_requests(struct intel_gt *gt)
{
cancel_delayed_work(>->requests.retire_work);
}
void intel_gt_unpark_requests(struct intel_gt *gt)
{
queue_delayed_work(gt->i915->unordered_wq, >->requests.retire_work,
round_jiffies_up_relative(HZ));
}
void intel_gt_fini_requests(struct intel_gt *gt)
{
/* Wait until the work is marked as finished before unloading! */
cancel_delayed_work_sync(>->requests.retire_work);
flush_work(>->watchdog.work);
}
void intel_gt_watchdog_work(struct work_struct *work)
{
struct intel_gt *gt =
container_of(work, typeof(*gt), watchdog.work);
struct i915_request *rq, *rn;
struct llist_node *first;
first = llist_del_all(>->watchdog.list);
if (!first)
return;
llist_for_each_entry_safe(rq, rn, first, watchdog.link) {
if (!i915_request_completed(rq)) {
struct dma_fence *f = &rq->fence;
pr_notice("Fence expiration time out i915-%s:%s:%llx!\n",
f->ops->get_driver_name(f),
f->ops->get_timeline_name(f),
f->seqno);
i915_request_cancel(rq, -EINTR);
}
i915_request_put(rq);
}
}
| linux-master | drivers/gpu/drm/i915/gt/intel_gt_requests.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2017-2019 Intel Corporation
*/
#include "intel_wopcm.h"
#include "i915_drv.h"
/**
* DOC: WOPCM Layout
*
* The layout of the WOPCM will be fixed after writing to GuC WOPCM size and
* offset registers whose values are calculated and determined by HuC/GuC
* firmware size and set of hardware requirements/restrictions as shown below:
*
* ::
*
* +=========> +====================+ <== WOPCM Top
* ^ | HW contexts RSVD |
* | +===> +====================+ <== GuC WOPCM Top
* | ^ | |
* | | | |
* | | | |
* | GuC | |
* | WOPCM | |
* | Size +--------------------+
* WOPCM | | GuC FW RSVD |
* | | +--------------------+
* | | | GuC Stack RSVD |
* | | +------------------- +
* | v | GuC WOPCM RSVD |
* | +===> +====================+ <== GuC WOPCM base
* | | WOPCM RSVD |
* | +------------------- + <== HuC Firmware Top
* v | HuC FW |
* +=========> +====================+ <== WOPCM Base
*
* GuC accessible WOPCM starts at GuC WOPCM base and ends at GuC WOPCM top.
* The top part of the WOPCM is reserved for hardware contexts (e.g. RC6
* context).
*/
/* Default WOPCM size is 2MB from Gen11, 1MB on previous platforms */
#define GEN11_WOPCM_SIZE SZ_2M
#define GEN9_WOPCM_SIZE SZ_1M
#define MAX_WOPCM_SIZE SZ_8M
/* 16KB WOPCM (RSVD WOPCM) is reserved from HuC firmware top. */
#define WOPCM_RESERVED_SIZE SZ_16K
/* 16KB reserved at the beginning of GuC WOPCM. */
#define GUC_WOPCM_RESERVED SZ_16K
/* 8KB from GUC_WOPCM_RESERVED is reserved for GuC stack. */
#define GUC_WOPCM_STACK_RESERVED SZ_8K
/* GuC WOPCM Offset value needs to be aligned to 16KB. */
#define GUC_WOPCM_OFFSET_ALIGNMENT (1UL << GUC_WOPCM_OFFSET_SHIFT)
/* 24KB at the end of WOPCM is reserved for RC6 CTX on BXT. */
#define BXT_WOPCM_RC6_CTX_RESERVED (SZ_16K + SZ_8K)
/* 36KB WOPCM reserved at the end of WOPCM on ICL. */
#define ICL_WOPCM_HW_CTX_RESERVED (SZ_32K + SZ_4K)
/* 128KB from GUC_WOPCM_RESERVED is reserved for FW on Gen9. */
#define GEN9_GUC_FW_RESERVED SZ_128K
#define GEN9_GUC_WOPCM_OFFSET (GUC_WOPCM_RESERVED + GEN9_GUC_FW_RESERVED)
static inline struct intel_gt *wopcm_to_gt(struct intel_wopcm *wopcm)
{
return container_of(wopcm, struct intel_gt, wopcm);
}
/**
* intel_wopcm_init_early() - Early initialization of the WOPCM.
* @wopcm: pointer to intel_wopcm.
*
* Setup the size of WOPCM which will be used by later on WOPCM partitioning.
*/
void intel_wopcm_init_early(struct intel_wopcm *wopcm)
{
struct intel_gt *gt = wopcm_to_gt(wopcm);
struct drm_i915_private *i915 = gt->i915;
if (!HAS_GT_UC(i915))
return;
if (GRAPHICS_VER(i915) >= 11)
wopcm->size = GEN11_WOPCM_SIZE;
else
wopcm->size = GEN9_WOPCM_SIZE;
drm_dbg(&i915->drm, "WOPCM: %uK\n", wopcm->size / 1024);
}
static u32 context_reserved_size(struct drm_i915_private *i915)
{
if (IS_GEN9_LP(i915))
return BXT_WOPCM_RC6_CTX_RESERVED;
else if (GRAPHICS_VER(i915) >= 11)
return ICL_WOPCM_HW_CTX_RESERVED;
else
return 0;
}
static bool gen9_check_dword_gap(struct drm_i915_private *i915,
u32 guc_wopcm_base, u32 guc_wopcm_size)
{
u32 offset;
/*
* GuC WOPCM size shall be at least a dword larger than the offset from
* WOPCM base (GuC WOPCM offset from WOPCM base + GEN9_GUC_WOPCM_OFFSET)
* due to hardware limitation on Gen9.
*/
offset = guc_wopcm_base + GEN9_GUC_WOPCM_OFFSET;
if (offset > guc_wopcm_size ||
(guc_wopcm_size - offset) < sizeof(u32)) {
drm_err(&i915->drm,
"WOPCM: invalid GuC region size: %uK < %uK\n",
guc_wopcm_size / SZ_1K,
(u32)(offset + sizeof(u32)) / SZ_1K);
return false;
}
return true;
}
static bool gen9_check_huc_fw_fits(struct drm_i915_private *i915,
u32 guc_wopcm_size, u32 huc_fw_size)
{
/*
* On Gen9, hardware requires the total available GuC WOPCM
* size to be larger than or equal to HuC firmware size. Otherwise,
* firmware uploading would fail.
*/
if (huc_fw_size > guc_wopcm_size - GUC_WOPCM_RESERVED) {
drm_err(&i915->drm, "WOPCM: no space for %s: %uK < %uK\n",
intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC),
(guc_wopcm_size - GUC_WOPCM_RESERVED) / SZ_1K,
huc_fw_size / 1024);
return false;
}
return true;
}
static bool check_hw_restrictions(struct drm_i915_private *i915,
u32 guc_wopcm_base, u32 guc_wopcm_size,
u32 huc_fw_size)
{
if (GRAPHICS_VER(i915) == 9 && !gen9_check_dword_gap(i915, guc_wopcm_base,
guc_wopcm_size))
return false;
if (GRAPHICS_VER(i915) == 9 &&
!gen9_check_huc_fw_fits(i915, guc_wopcm_size, huc_fw_size))
return false;
return true;
}
static bool __check_layout(struct intel_gt *gt, u32 wopcm_size,
u32 guc_wopcm_base, u32 guc_wopcm_size,
u32 guc_fw_size, u32 huc_fw_size)
{
struct drm_i915_private *i915 = gt->i915;
const u32 ctx_rsvd = context_reserved_size(i915);
u32 size;
size = wopcm_size - ctx_rsvd;
if (unlikely(range_overflows(guc_wopcm_base, guc_wopcm_size, size))) {
drm_err(&i915->drm,
"WOPCM: invalid GuC region layout: %uK + %uK > %uK\n",
guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K,
size / SZ_1K);
return false;
}
size = guc_fw_size + GUC_WOPCM_RESERVED + GUC_WOPCM_STACK_RESERVED;
if (unlikely(guc_wopcm_size < size)) {
drm_err(&i915->drm, "WOPCM: no space for %s: %uK < %uK\n",
intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC),
guc_wopcm_size / SZ_1K, size / SZ_1K);
return false;
}
if (intel_uc_supports_huc(>->uc)) {
size = huc_fw_size + WOPCM_RESERVED_SIZE;
if (unlikely(guc_wopcm_base < size)) {
drm_err(&i915->drm, "WOPCM: no space for %s: %uK < %uK\n",
intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC),
guc_wopcm_base / SZ_1K, size / SZ_1K);
return false;
}
}
return check_hw_restrictions(i915, guc_wopcm_base, guc_wopcm_size,
huc_fw_size);
}
static bool __wopcm_regs_locked(struct intel_uncore *uncore,
u32 *guc_wopcm_base, u32 *guc_wopcm_size)
{
u32 reg_base = intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET);
u32 reg_size = intel_uncore_read(uncore, GUC_WOPCM_SIZE);
if (!(reg_size & GUC_WOPCM_SIZE_LOCKED) ||
!(reg_base & GUC_WOPCM_OFFSET_VALID))
return false;
*guc_wopcm_base = reg_base & GUC_WOPCM_OFFSET_MASK;
*guc_wopcm_size = reg_size & GUC_WOPCM_SIZE_MASK;
return true;
}
static bool __wopcm_regs_writable(struct intel_uncore *uncore)
{
if (!HAS_GUC_DEPRIVILEGE(uncore->i915))
return true;
return intel_uncore_read(uncore, GUC_SHIM_CONTROL2) & GUC_IS_PRIVILEGED;
}
/**
* intel_wopcm_init() - Initialize the WOPCM structure.
* @wopcm: pointer to intel_wopcm.
*
* This function will partition WOPCM space based on GuC and HuC firmware sizes
* and will allocate max remaining for use by GuC. This function will also
* enforce platform dependent hardware restrictions on GuC WOPCM offset and
* size. It will fail the WOPCM init if any of these checks fail, so that the
* following WOPCM registers setup and GuC firmware uploading would be aborted.
*/
void intel_wopcm_init(struct intel_wopcm *wopcm)
{
struct intel_gt *gt = wopcm_to_gt(wopcm);
struct drm_i915_private *i915 = gt->i915;
u32 guc_fw_size = intel_uc_fw_get_upload_size(>->uc.guc.fw);
u32 huc_fw_size = intel_uc_fw_get_upload_size(>->uc.huc.fw);
u32 ctx_rsvd = context_reserved_size(i915);
u32 wopcm_size = wopcm->size;
u32 guc_wopcm_base;
u32 guc_wopcm_size;
if (!guc_fw_size)
return;
GEM_BUG_ON(!wopcm_size);
GEM_BUG_ON(wopcm->guc.base);
GEM_BUG_ON(wopcm->guc.size);
GEM_BUG_ON(guc_fw_size >= wopcm_size);
GEM_BUG_ON(huc_fw_size >= wopcm_size);
GEM_BUG_ON(ctx_rsvd + WOPCM_RESERVED_SIZE >= wopcm_size);
if (i915_inject_probe_failure(i915))
return;
if (__wopcm_regs_locked(gt->uncore, &guc_wopcm_base, &guc_wopcm_size)) {
drm_dbg(&i915->drm, "GuC WOPCM is already locked [%uK, %uK)\n",
guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K);
/*
* Note that to keep things simple (i.e. avoid different
* defines per platform) our WOPCM math doesn't always use the
* actual WOPCM size, but a value that is less or equal to it.
* This is perfectly fine when i915 programs the registers, but
* on platforms with GuC deprivilege the registers are not
* writable from i915 and are instead pre-programmed by the
* bios/IFWI, so there might be a mismatch of sizes.
* Instead of handling the size difference, we trust that the
* programmed values make sense and disable the relevant check
* by using the maximum possible WOPCM size in the verification
* math. In the extremely unlikely case that the registers
* were pre-programmed with an invalid value, we will still
* gracefully fail later during the GuC/HuC dma.
*/
if (!__wopcm_regs_writable(gt->uncore))
wopcm_size = MAX_WOPCM_SIZE;
goto check;
}
/*
* On platforms with a media GT, the WOPCM is partitioned between the
* two GTs, so we would have to take that into account when doing the
* math below. There is also a new section reserved for the GSC context
* that would have to be factored in. However, all platforms with a
* media GT also have GuC depriv enabled, so the WOPCM regs are
* pre-locked and therefore we don't have to do the math ourselves.
*/
if (unlikely(i915->media_gt)) {
drm_err(&i915->drm, "Unlocked WOPCM regs with media GT\n");
return;
}
/*
* Aligned value of guc_wopcm_base will determine available WOPCM space
* for HuC firmware and mandatory reserved area.
*/
guc_wopcm_base = huc_fw_size + WOPCM_RESERVED_SIZE;
guc_wopcm_base = ALIGN(guc_wopcm_base, GUC_WOPCM_OFFSET_ALIGNMENT);
/*
* Need to clamp guc_wopcm_base now to make sure the following math is
* correct. Formal check of whole WOPCM layout will be done below.
*/
guc_wopcm_base = min(guc_wopcm_base, wopcm_size - ctx_rsvd);
/* Aligned remainings of usable WOPCM space can be assigned to GuC. */
guc_wopcm_size = wopcm_size - ctx_rsvd - guc_wopcm_base;
guc_wopcm_size &= GUC_WOPCM_SIZE_MASK;
drm_dbg(&i915->drm, "Calculated GuC WOPCM [%uK, %uK)\n",
guc_wopcm_base / SZ_1K, guc_wopcm_size / SZ_1K);
check:
if (__check_layout(gt, wopcm_size, guc_wopcm_base, guc_wopcm_size,
guc_fw_size, huc_fw_size)) {
wopcm->guc.base = guc_wopcm_base;
wopcm->guc.size = guc_wopcm_size;
GEM_BUG_ON(!wopcm->guc.base);
GEM_BUG_ON(!wopcm->guc.size);
}
}
| linux-master | drivers/gpu/drm/i915/gt/intel_wopcm.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2014 Intel Corporation
*/
/**
* DOC: Logical Rings, Logical Ring Contexts and Execlists
*
* Motivation:
* GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
* These expanded contexts enable a number of new abilities, especially
* "Execlists" (also implemented in this file).
*
* One of the main differences with the legacy HW contexts is that logical
* ring contexts incorporate many more things to the context's state, like
* PDPs or ringbuffer control registers:
*
* The reason why PDPs are included in the context is straightforward: as
* PPGTTs (per-process GTTs) are actually per-context, having the PDPs
* contained there mean you don't need to do a ppgtt->switch_mm yourself,
* instead, the GPU will do it for you on the context switch.
*
* But, what about the ringbuffer control registers (head, tail, etc..)?
* shouldn't we just need a set of those per engine command streamer? This is
* where the name "Logical Rings" starts to make sense: by virtualizing the
* rings, the engine cs shifts to a new "ring buffer" with every context
* switch. When you want to submit a workload to the GPU you: A) choose your
* context, B) find its appropriate virtualized ring, C) write commands to it
* and then, finally, D) tell the GPU to switch to that context.
*
* Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
* to a contexts is via a context execution list, ergo "Execlists".
*
* LRC implementation:
* Regarding the creation of contexts, we have:
*
* - One global default context.
* - One local default context for each opened fd.
* - One local extra context for each context create ioctl call.
*
* Now that ringbuffers belong per-context (and not per-engine, like before)
* and that contexts are uniquely tied to a given engine (and not reusable,
* like before) we need:
*
* - One ringbuffer per-engine inside each context.
* - One backing object per-engine inside each context.
*
* The global default context starts its life with these new objects fully
* allocated and populated. The local default context for each opened fd is
* more complex, because we don't know at creation time which engine is going
* to use them. To handle this, we have implemented a deferred creation of LR
* contexts:
*
* The local context starts its life as a hollow or blank holder, that only
* gets populated for a given engine once we receive an execbuffer. If later
* on we receive another execbuffer ioctl for the same context but a different
* engine, we allocate/populate a new ringbuffer and context backing object and
* so on.
*
* Finally, regarding local contexts created using the ioctl call: as they are
* only allowed with the render ring, we can allocate & populate them right
* away (no need to defer anything, at least for now).
*
* Execlists implementation:
* Execlists are the new method by which, on gen8+ hardware, workloads are
* submitted for execution (as opposed to the legacy, ringbuffer-based, method).
* This method works as follows:
*
* When a request is committed, its commands (the BB start and any leading or
* trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
* for the appropriate context. The tail pointer in the hardware context is not
* updated at this time, but instead, kept by the driver in the ringbuffer
* structure. A structure representing this request is added to a request queue
* for the appropriate engine: this structure contains a copy of the context's
* tail after the request was written to the ring buffer and a pointer to the
* context itself.
*
* If the engine's request queue was empty before the request was added, the
* queue is processed immediately. Otherwise the queue will be processed during
* a context switch interrupt. In any case, elements on the queue will get sent
* (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
* globally unique 20-bits submission ID.
*
* When execution of a request completes, the GPU updates the context status
* buffer with a context complete event and generates a context switch interrupt.
* During the interrupt handling, the driver examines the events in the buffer:
* for each context complete event, if the announced ID matches that on the head
* of the request queue, then that request is retired and removed from the queue.
*
* After processing, if any requests were retired and the queue is not empty
* then a new execution list can be submitted. The two requests at the front of
* the queue are next to be submitted but since a context may not occur twice in
* an execution list, if subsequent requests have the same ID as the first then
* the two requests must be combined. This is done simply by discarding requests
* at the head of the queue until either only one requests is left (in which case
* we use a NULL second context) or the first two requests have unique IDs.
*
* By always executing the first two requests in the queue the driver ensures
* that the GPU is kept as busy as possible. In the case where a single context
* completes but a second context is still executing, the request for this second
* context will be at the head of the queue when we remove the first one. This
* request will then be resubmitted along with a new request for a different context,
* which will cause the hardware to continue executing the second request and queue
* the new request (the GPU detects the condition of a context getting preempted
* with the same context and optimizes the context switch flow by not doing
* preemption, but just sampling the new tail pointer).
*
*/
#include <linux/interrupt.h>
#include <linux/string_helpers.h>
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
#include "gen8_engine_cs.h"
#include "intel_breadcrumbs.h"
#include "intel_context.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
#include "intel_engine_regs.h"
#include "intel_engine_stats.h"
#include "intel_execlists_submission.h"
#include "intel_gt.h"
#include "intel_gt_irq.h"
#include "intel_gt_pm.h"
#include "intel_gt_regs.h"
#include "intel_gt_requests.h"
#include "intel_lrc.h"
#include "intel_lrc_reg.h"
#include "intel_mocs.h"
#include "intel_reset.h"
#include "intel_ring.h"
#include "intel_workarounds.h"
#include "shmem_utils.h"
#define RING_EXECLIST_QFULL (1 << 0x2)
#define RING_EXECLIST1_VALID (1 << 0x3)
#define RING_EXECLIST0_VALID (1 << 0x4)
#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
#define RING_EXECLIST1_ACTIVE (1 << 0x11)
#define RING_EXECLIST0_ACTIVE (1 << 0x12)
#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
#define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
#define GEN8_CTX_STATUS_COMPLETE (1 << 4)
#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
#define GEN8_CTX_STATUS_COMPLETED_MASK \
(GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED)
#define GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE (0x1) /* lower csb dword */
#define GEN12_CTX_SWITCH_DETAIL(csb_dw) ((csb_dw) & 0xF) /* upper csb dword */
#define GEN12_CSB_SW_CTX_ID_MASK GENMASK(25, 15)
#define GEN12_IDLE_CTX_ID 0x7FF
#define GEN12_CSB_CTX_VALID(csb_dw) \
(FIELD_GET(GEN12_CSB_SW_CTX_ID_MASK, csb_dw) != GEN12_IDLE_CTX_ID)
#define XEHP_CTX_STATUS_SWITCHED_TO_NEW_QUEUE BIT(1) /* upper csb dword */
#define XEHP_CSB_SW_CTX_ID_MASK GENMASK(31, 10)
#define XEHP_IDLE_CTX_ID 0xFFFF
#define XEHP_CSB_CTX_VALID(csb_dw) \
(FIELD_GET(XEHP_CSB_SW_CTX_ID_MASK, csb_dw) != XEHP_IDLE_CTX_ID)
/* Typical size of the average request (2 pipecontrols and a MI_BB) */
#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
struct virtual_engine {
struct intel_engine_cs base;
struct intel_context context;
struct rcu_work rcu;
/*
* We allow only a single request through the virtual engine at a time
* (each request in the timeline waits for the completion fence of
* the previous before being submitted). By restricting ourselves to
* only submitting a single request, each request is placed on to a
* physical to maximise load spreading (by virtue of the late greedy
* scheduling -- each real engine takes the next available request
* upon idling).
*/
struct i915_request *request;
/*
* We keep a rbtree of available virtual engines inside each physical
* engine, sorted by priority. Here we preallocate the nodes we need
* for the virtual engine, indexed by physical_engine->id.
*/
struct ve_node {
struct rb_node rb;
int prio;
} nodes[I915_NUM_ENGINES];
/* And finally, which physical engines this virtual engine maps onto. */
unsigned int num_siblings;
struct intel_engine_cs *siblings[];
};
static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
{
GEM_BUG_ON(!intel_engine_is_virtual(engine));
return container_of(engine, struct virtual_engine, base);
}
static struct intel_context *
execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
unsigned long flags);
static struct i915_request *
__active_request(const struct intel_timeline * const tl,
struct i915_request *rq,
int error)
{
struct i915_request *active = rq;
list_for_each_entry_from_reverse(rq, &tl->requests, link) {
if (__i915_request_is_complete(rq))
break;
if (error) {
i915_request_set_error_once(rq, error);
__i915_request_skip(rq);
}
active = rq;
}
return active;
}
static struct i915_request *
active_request(const struct intel_timeline * const tl, struct i915_request *rq)
{
return __active_request(tl, rq, 0);
}
static void ring_set_paused(const struct intel_engine_cs *engine, int state)
{
/*
* We inspect HWS_PREEMPT with a semaphore inside
* engine->emit_fini_breadcrumb. If the dword is true,
* the ring is paused as the semaphore will busywait
* until the dword is false.
*/
engine->status_page.addr[I915_GEM_HWS_PREEMPT] = state;
if (state)
wmb();
}
static struct i915_priolist *to_priolist(struct rb_node *rb)
{
return rb_entry(rb, struct i915_priolist, node);
}
static int rq_prio(const struct i915_request *rq)
{
return READ_ONCE(rq->sched.attr.priority);
}
static int effective_prio(const struct i915_request *rq)
{
int prio = rq_prio(rq);
/*
* If this request is special and must not be interrupted at any
* cost, so be it. Note we are only checking the most recent request
* in the context and so may be masking an earlier vip request. It
* is hoped that under the conditions where nopreempt is used, this
* will not matter (i.e. all requests to that context will be
* nopreempt for as long as desired).
*/
if (i915_request_has_nopreempt(rq))
prio = I915_PRIORITY_UNPREEMPTABLE;
return prio;
}
static int queue_prio(const struct i915_sched_engine *sched_engine)
{
struct rb_node *rb;
rb = rb_first_cached(&sched_engine->queue);
if (!rb)
return INT_MIN;
return to_priolist(rb)->priority;
}
static int virtual_prio(const struct intel_engine_execlists *el)
{
struct rb_node *rb = rb_first_cached(&el->virtual);
return rb ? rb_entry(rb, struct ve_node, rb)->prio : INT_MIN;
}
static bool need_preempt(const struct intel_engine_cs *engine,
const struct i915_request *rq)
{
int last_prio;
if (!intel_engine_has_semaphores(engine))
return false;
/*
* Check if the current priority hint merits a preemption attempt.
*
* We record the highest value priority we saw during rescheduling
* prior to this dequeue, therefore we know that if it is strictly
* less than the current tail of ESLP[0], we do not need to force
* a preempt-to-idle cycle.
*
* However, the priority hint is a mere hint that we may need to
* preempt. If that hint is stale or we may be trying to preempt
* ourselves, ignore the request.
*
* More naturally we would write
* prio >= max(0, last);
* except that we wish to prevent triggering preemption at the same
* priority level: the task that is running should remain running
* to preserve FIFO ordering of dependencies.
*/
last_prio = max(effective_prio(rq), I915_PRIORITY_NORMAL - 1);
if (engine->sched_engine->queue_priority_hint <= last_prio)
return false;
/*
* Check against the first request in ELSP[1], it will, thanks to the
* power of PI, be the highest priority of that context.
*/
if (!list_is_last(&rq->sched.link, &engine->sched_engine->requests) &&
rq_prio(list_next_entry(rq, sched.link)) > last_prio)
return true;
/*
* If the inflight context did not trigger the preemption, then maybe
* it was the set of queued requests? Pick the highest priority in
* the queue (the first active priolist) and see if it deserves to be
* running instead of ELSP[0].
*
* The highest priority request in the queue can not be either
* ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same
* context, it's priority would not exceed ELSP[0] aka last_prio.
*/
return max(virtual_prio(&engine->execlists),
queue_prio(engine->sched_engine)) > last_prio;
}
__maybe_unused static bool
assert_priority_queue(const struct i915_request *prev,
const struct i915_request *next)
{
/*
* Without preemption, the prev may refer to the still active element
* which we refuse to let go.
*
* Even with preemption, there are times when we think it is better not
* to preempt and leave an ostensibly lower priority request in flight.
*/
if (i915_request_is_active(prev))
return true;
return rq_prio(prev) >= rq_prio(next);
}
static struct i915_request *
__unwind_incomplete_requests(struct intel_engine_cs *engine)
{
struct i915_request *rq, *rn, *active = NULL;
struct list_head *pl;
int prio = I915_PRIORITY_INVALID;
lockdep_assert_held(&engine->sched_engine->lock);
list_for_each_entry_safe_reverse(rq, rn,
&engine->sched_engine->requests,
sched.link) {
if (__i915_request_is_complete(rq)) {
list_del_init(&rq->sched.link);
continue;
}
__i915_request_unsubmit(rq);
GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
if (rq_prio(rq) != prio) {
prio = rq_prio(rq);
pl = i915_sched_lookup_priolist(engine->sched_engine,
prio);
}
GEM_BUG_ON(i915_sched_engine_is_empty(engine->sched_engine));
list_move(&rq->sched.link, pl);
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
/* Check in case we rollback so far we wrap [size/2] */
if (intel_ring_direction(rq->ring,
rq->tail,
rq->ring->tail + 8) > 0)
rq->context->lrc.desc |= CTX_DESC_FORCE_RESTORE;
active = rq;
}
return active;
}
struct i915_request *
execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
{
struct intel_engine_cs *engine =
container_of(execlists, typeof(*engine), execlists);
return __unwind_incomplete_requests(engine);
}
static void
execlists_context_status_change(struct i915_request *rq, unsigned long status)
{
/*
* Only used when GVT-g is enabled now. When GVT-g is disabled,
* The compiler should eliminate this function as dead-code.
*/
if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
return;
atomic_notifier_call_chain(&rq->engine->context_status_notifier,
status, rq);
}
static void reset_active(struct i915_request *rq,
struct intel_engine_cs *engine)
{
struct intel_context * const ce = rq->context;
u32 head;
/*
* The executing context has been cancelled. We want to prevent
* further execution along this context and propagate the error on
* to anything depending on its results.
*
* In __i915_request_submit(), we apply the -EIO and remove the
* requests' payloads for any banned requests. But first, we must
* rewind the context back to the start of the incomplete request so
* that we do not jump back into the middle of the batch.
*
* We preserve the breadcrumbs and semaphores of the incomplete
* requests so that inter-timeline dependencies (i.e other timelines)
* remain correctly ordered. And we defer to __i915_request_submit()
* so that all asynchronous waits are correctly handled.
*/
ENGINE_TRACE(engine, "{ reset rq=%llx:%lld }\n",
rq->fence.context, rq->fence.seqno);
/* On resubmission of the active request, payload will be scrubbed */
if (__i915_request_is_complete(rq))
head = rq->tail;
else
head = __active_request(ce->timeline, rq, -EIO)->head;
head = intel_ring_wrap(ce->ring, head);
/* Scrub the context image to prevent replaying the previous batch */
lrc_init_regs(ce, engine, true);
/* We've switched away, so this should be a no-op, but intent matters */
ce->lrc.lrca = lrc_update_regs(ce, engine, head);
}
static bool bad_request(const struct i915_request *rq)
{
return rq->fence.error && i915_request_started(rq);
}
static struct intel_engine_cs *
__execlists_schedule_in(struct i915_request *rq)
{
struct intel_engine_cs * const engine = rq->engine;
struct intel_context * const ce = rq->context;
intel_context_get(ce);
if (unlikely(intel_context_is_closed(ce) &&
!intel_engine_has_heartbeat(engine)))
intel_context_set_exiting(ce);
if (unlikely(!intel_context_is_schedulable(ce) || bad_request(rq)))
reset_active(rq, engine);
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
lrc_check_regs(ce, engine, "before");
if (ce->tag) {
/* Use a fixed tag for OA and friends */
GEM_BUG_ON(ce->tag <= BITS_PER_LONG);
ce->lrc.ccid = ce->tag;
} else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) {
/* We don't need a strict matching tag, just different values */
unsigned int tag = ffs(READ_ONCE(engine->context_tag));
GEM_BUG_ON(tag == 0 || tag >= BITS_PER_LONG);
clear_bit(tag - 1, &engine->context_tag);
ce->lrc.ccid = tag << (XEHP_SW_CTX_ID_SHIFT - 32);
BUILD_BUG_ON(BITS_PER_LONG > GEN12_MAX_CONTEXT_HW_ID);
} else {
/* We don't need a strict matching tag, just different values */
unsigned int tag = __ffs(engine->context_tag);
GEM_BUG_ON(tag >= BITS_PER_LONG);
__clear_bit(tag, &engine->context_tag);
ce->lrc.ccid = (1 + tag) << (GEN11_SW_CTX_ID_SHIFT - 32);
BUILD_BUG_ON(BITS_PER_LONG > GEN12_MAX_CONTEXT_HW_ID);
}
ce->lrc.ccid |= engine->execlists.ccid;
__intel_gt_pm_get(engine->gt);
if (engine->fw_domain && !engine->fw_active++)
intel_uncore_forcewake_get(engine->uncore, engine->fw_domain);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
intel_engine_context_in(engine);
CE_TRACE(ce, "schedule-in, ccid:%x\n", ce->lrc.ccid);
return engine;
}
static void execlists_schedule_in(struct i915_request *rq, int idx)
{
struct intel_context * const ce = rq->context;
struct intel_engine_cs *old;
GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine));
trace_i915_request_in(rq, idx);
old = ce->inflight;
if (!old)
old = __execlists_schedule_in(rq);
WRITE_ONCE(ce->inflight, ptr_inc(old));
GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
}
static void
resubmit_virtual_request(struct i915_request *rq, struct virtual_engine *ve)
{
struct intel_engine_cs *engine = rq->engine;
spin_lock_irq(&engine->sched_engine->lock);
clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
WRITE_ONCE(rq->engine, &ve->base);
ve->base.submit_request(rq);
spin_unlock_irq(&engine->sched_engine->lock);
}
static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
{
struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
struct intel_engine_cs *engine = rq->engine;
/*
* After this point, the rq may be transferred to a new sibling, so
* before we clear ce->inflight make sure that the context has been
* removed from the b->signalers and furthermore we need to make sure
* that the concurrent iterator in signal_irq_work is no longer
* following ce->signal_link.
*/
if (!list_empty(&ce->signals))
intel_context_remove_breadcrumbs(ce, engine->breadcrumbs);
/*
* This engine is now too busy to run this virtual request, so
* see if we can find an alternative engine for it to execute on.
* Once a request has become bonded to this engine, we treat it the
* same as other native request.
*/
if (i915_request_in_priority_queue(rq) &&
rq->execution_mask != engine->mask)
resubmit_virtual_request(rq, ve);
if (READ_ONCE(ve->request))
tasklet_hi_schedule(&ve->base.sched_engine->tasklet);
}
static void __execlists_schedule_out(struct i915_request * const rq,
struct intel_context * const ce)
{
struct intel_engine_cs * const engine = rq->engine;
unsigned int ccid;
/*
* NB process_csb() is not under the engine->sched_engine->lock and hence
* schedule_out can race with schedule_in meaning that we should
* refrain from doing non-trivial work here.
*/
CE_TRACE(ce, "schedule-out, ccid:%x\n", ce->lrc.ccid);
GEM_BUG_ON(ce->inflight != engine);
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
lrc_check_regs(ce, engine, "after");
/*
* If we have just completed this context, the engine may now be
* idle and we want to re-enter powersaving.
*/
if (intel_timeline_is_last(ce->timeline, rq) &&
__i915_request_is_complete(rq))
intel_engine_add_retire(engine, ce->timeline);
ccid = ce->lrc.ccid;
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) {
ccid >>= XEHP_SW_CTX_ID_SHIFT - 32;
ccid &= XEHP_MAX_CONTEXT_HW_ID;
} else {
ccid >>= GEN11_SW_CTX_ID_SHIFT - 32;
ccid &= GEN12_MAX_CONTEXT_HW_ID;
}
if (ccid < BITS_PER_LONG) {
GEM_BUG_ON(ccid == 0);
GEM_BUG_ON(test_bit(ccid - 1, &engine->context_tag));
__set_bit(ccid - 1, &engine->context_tag);
}
intel_engine_context_out(engine);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
if (engine->fw_domain && !--engine->fw_active)
intel_uncore_forcewake_put(engine->uncore, engine->fw_domain);
intel_gt_pm_put_async(engine->gt);
/*
* If this is part of a virtual engine, its next request may
* have been blocked waiting for access to the active context.
* We have to kick all the siblings again in case we need to
* switch (e.g. the next request is not runnable on this
* engine). Hopefully, we will already have submitted the next
* request before the tasklet runs and do not need to rebuild
* each virtual tree and kick everyone again.
*/
if (ce->engine != engine)
kick_siblings(rq, ce);
WRITE_ONCE(ce->inflight, NULL);
intel_context_put(ce);
}
static inline void execlists_schedule_out(struct i915_request *rq)
{
struct intel_context * const ce = rq->context;
trace_i915_request_out(rq);
GEM_BUG_ON(!ce->inflight);
ce->inflight = ptr_dec(ce->inflight);
if (!__intel_context_inflight_count(ce->inflight))
__execlists_schedule_out(rq, ce);
i915_request_put(rq);
}
static u32 map_i915_prio_to_lrc_desc_prio(int prio)
{
if (prio > I915_PRIORITY_NORMAL)
return GEN12_CTX_PRIORITY_HIGH;
else if (prio < I915_PRIORITY_NORMAL)
return GEN12_CTX_PRIORITY_LOW;
else
return GEN12_CTX_PRIORITY_NORMAL;
}
static u64 execlists_update_context(struct i915_request *rq)
{
struct intel_context *ce = rq->context;
u64 desc;
u32 tail, prev;
desc = ce->lrc.desc;
if (rq->engine->flags & I915_ENGINE_HAS_EU_PRIORITY)
desc |= map_i915_prio_to_lrc_desc_prio(rq_prio(rq));
/*
* WaIdleLiteRestore:bdw,skl
*
* We should never submit the context with the same RING_TAIL twice
* just in case we submit an empty ring, which confuses the HW.
*
* We append a couple of NOOPs (gen8_emit_wa_tail) after the end of
* the normal request to be able to always advance the RING_TAIL on
* subsequent resubmissions (for lite restore). Should that fail us,
* and we try and submit the same tail again, force the context
* reload.
*
* If we need to return to a preempted context, we need to skip the
* lite-restore and force it to reload the RING_TAIL. Otherwise, the
* HW has a tendency to ignore us rewinding the TAIL to the end of
* an earlier request.
*/
GEM_BUG_ON(ce->lrc_reg_state[CTX_RING_TAIL] != rq->ring->tail);
prev = rq->ring->tail;
tail = intel_ring_set_tail(rq->ring, rq->tail);
if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0))
desc |= CTX_DESC_FORCE_RESTORE;
ce->lrc_reg_state[CTX_RING_TAIL] = tail;
rq->tail = rq->wa_tail;
/*
* Make sure the context image is complete before we submit it to HW.
*
* Ostensibly, writes (including the WCB) should be flushed prior to
* an uncached write such as our mmio register access, the empirical
* evidence (esp. on Braswell) suggests that the WC write into memory
* may not be visible to the HW prior to the completion of the UC
* register write and that we may begin execution from the context
* before its image is complete leading to invalid PD chasing.
*/
wmb();
ce->lrc.desc &= ~CTX_DESC_FORCE_RESTORE;
return desc;
}
static void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port)
{
if (execlists->ctrl_reg) {
writel(lower_32_bits(desc), execlists->submit_reg + port * 2);
writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1);
} else {
writel(upper_32_bits(desc), execlists->submit_reg);
writel(lower_32_bits(desc), execlists->submit_reg);
}
}
static __maybe_unused char *
dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq)
{
if (!rq)
return "";
snprintf(buf, buflen, "%sccid:%x %llx:%lld%s prio %d",
prefix,
rq->context->lrc.ccid,
rq->fence.context, rq->fence.seqno,
__i915_request_is_complete(rq) ? "!" :
__i915_request_has_started(rq) ? "*" :
"",
rq_prio(rq));
return buf;
}
static __maybe_unused noinline void
trace_ports(const struct intel_engine_execlists *execlists,
const char *msg,
struct i915_request * const *ports)
{
const struct intel_engine_cs *engine =
container_of(execlists, typeof(*engine), execlists);
char __maybe_unused p0[40], p1[40];
if (!ports[0])
return;
ENGINE_TRACE(engine, "%s { %s%s }\n", msg,
dump_port(p0, sizeof(p0), "", ports[0]),
dump_port(p1, sizeof(p1), ", ", ports[1]));
}
static bool
reset_in_progress(const struct intel_engine_cs *engine)
{
return unlikely(!__tasklet_is_enabled(&engine->sched_engine->tasklet));
}
static __maybe_unused noinline bool
assert_pending_valid(const struct intel_engine_execlists *execlists,
const char *msg)
{
struct intel_engine_cs *engine =
container_of(execlists, typeof(*engine), execlists);
struct i915_request * const *port, *rq, *prev = NULL;
struct intel_context *ce = NULL;
u32 ccid = -1;
trace_ports(execlists, msg, execlists->pending);
/* We may be messing around with the lists during reset, lalala */
if (reset_in_progress(engine))
return true;
if (!execlists->pending[0]) {
GEM_TRACE_ERR("%s: Nothing pending for promotion!\n",
engine->name);
return false;
}
if (execlists->pending[execlists_num_ports(execlists)]) {
GEM_TRACE_ERR("%s: Excess pending[%d] for promotion!\n",
engine->name, execlists_num_ports(execlists));
return false;
}
for (port = execlists->pending; (rq = *port); port++) {
unsigned long flags;
bool ok = true;
GEM_BUG_ON(!kref_read(&rq->fence.refcount));
GEM_BUG_ON(!i915_request_is_active(rq));
if (ce == rq->context) {
GEM_TRACE_ERR("%s: Dup context:%llx in pending[%zd]\n",
engine->name,
ce->timeline->fence_context,
port - execlists->pending);
return false;
}
ce = rq->context;
if (ccid == ce->lrc.ccid) {
GEM_TRACE_ERR("%s: Dup ccid:%x context:%llx in pending[%zd]\n",
engine->name,
ccid, ce->timeline->fence_context,
port - execlists->pending);
return false;
}
ccid = ce->lrc.ccid;
/*
* Sentinels are supposed to be the last request so they flush
* the current execution off the HW. Check that they are the only
* request in the pending submission.
*
* NB: Due to the async nature of preempt-to-busy and request
* cancellation we need to handle the case where request
* becomes a sentinel in parallel to CSB processing.
*/
if (prev && i915_request_has_sentinel(prev) &&
!READ_ONCE(prev->fence.error)) {
GEM_TRACE_ERR("%s: context:%llx after sentinel in pending[%zd]\n",
engine->name,
ce->timeline->fence_context,
port - execlists->pending);
return false;
}
prev = rq;
/*
* We want virtual requests to only be in the first slot so
* that they are never stuck behind a hog and can be immediately
* transferred onto the next idle engine.
*/
if (rq->execution_mask != engine->mask &&
port != execlists->pending) {
GEM_TRACE_ERR("%s: virtual engine:%llx not in prime position[%zd]\n",
engine->name,
ce->timeline->fence_context,
port - execlists->pending);
return false;
}
/* Hold tightly onto the lock to prevent concurrent retires! */
if (!spin_trylock_irqsave(&rq->lock, flags))
continue;
if (__i915_request_is_complete(rq))
goto unlock;
if (i915_active_is_idle(&ce->active) &&
!intel_context_is_barrier(ce)) {
GEM_TRACE_ERR("%s: Inactive context:%llx in pending[%zd]\n",
engine->name,
ce->timeline->fence_context,
port - execlists->pending);
ok = false;
goto unlock;
}
if (!i915_vma_is_pinned(ce->state)) {
GEM_TRACE_ERR("%s: Unpinned context:%llx in pending[%zd]\n",
engine->name,
ce->timeline->fence_context,
port - execlists->pending);
ok = false;
goto unlock;
}
if (!i915_vma_is_pinned(ce->ring->vma)) {
GEM_TRACE_ERR("%s: Unpinned ring:%llx in pending[%zd]\n",
engine->name,
ce->timeline->fence_context,
port - execlists->pending);
ok = false;
goto unlock;
}
unlock:
spin_unlock_irqrestore(&rq->lock, flags);
if (!ok)
return false;
}
return ce;
}
static void execlists_submit_ports(struct intel_engine_cs *engine)
{
struct intel_engine_execlists *execlists = &engine->execlists;
unsigned int n;
GEM_BUG_ON(!assert_pending_valid(execlists, "submit"));
/*
* We can skip acquiring intel_runtime_pm_get() here as it was taken
* on our behalf by the request (see i915_gem_mark_busy()) and it will
* not be relinquished until the device is idle (see
* i915_gem_idle_work_handler()). As a precaution, we make sure
* that all ELSP are drained i.e. we have processed the CSB,
* before allowing ourselves to idle and calling intel_runtime_pm_put().
*/
GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
/*
* ELSQ note: the submit queue is not cleared after being submitted
* to the HW so we need to make sure we always clean it up. This is
* currently ensured by the fact that we always write the same number
* of elsq entries, keep this in mind before changing the loop below.
*/
for (n = execlists_num_ports(execlists); n--; ) {
struct i915_request *rq = execlists->pending[n];
write_desc(execlists,
rq ? execlists_update_context(rq) : 0,
n);
}
/* we need to manually load the submit queue */
if (execlists->ctrl_reg)
writel(EL_CTRL_LOAD, execlists->ctrl_reg);
}
static bool ctx_single_port_submission(const struct intel_context *ce)
{
return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
intel_context_force_single_submission(ce));
}
static bool can_merge_ctx(const struct intel_context *prev,
const struct intel_context *next)
{
if (prev != next)
return false;
if (ctx_single_port_submission(prev))
return false;
return true;
}
static unsigned long i915_request_flags(const struct i915_request *rq)
{
return READ_ONCE(rq->fence.flags);
}
static bool can_merge_rq(const struct i915_request *prev,
const struct i915_request *next)
{
GEM_BUG_ON(prev == next);
GEM_BUG_ON(!assert_priority_queue(prev, next));
/*
* We do not submit known completed requests. Therefore if the next
* request is already completed, we can pretend to merge it in
* with the previous context (and we will skip updating the ELSP
* and tracking). Thus hopefully keeping the ELSP full with active
* contexts, despite the best efforts of preempt-to-busy to confuse
* us.
*/
if (__i915_request_is_complete(next))
return true;
if (unlikely((i915_request_flags(prev) | i915_request_flags(next)) &
(BIT(I915_FENCE_FLAG_NOPREEMPT) |
BIT(I915_FENCE_FLAG_SENTINEL))))
return false;
if (!can_merge_ctx(prev->context, next->context))
return false;
GEM_BUG_ON(i915_seqno_passed(prev->fence.seqno, next->fence.seqno));
return true;
}
static bool virtual_matches(const struct virtual_engine *ve,
const struct i915_request *rq,
const struct intel_engine_cs *engine)
{
const struct intel_engine_cs *inflight;
if (!rq)
return false;
if (!(rq->execution_mask & engine->mask)) /* We peeked too soon! */
return false;
/*
* We track when the HW has completed saving the context image
* (i.e. when we have seen the final CS event switching out of
* the context) and must not overwrite the context image before
* then. This restricts us to only using the active engine
* while the previous virtualized request is inflight (so
* we reuse the register offsets). This is a very small
* hystersis on the greedy seelction algorithm.
*/
inflight = intel_context_inflight(&ve->context);
if (inflight && inflight != engine)
return false;
return true;
}
static struct virtual_engine *
first_virtual_engine(struct intel_engine_cs *engine)
{
struct intel_engine_execlists *el = &engine->execlists;
struct rb_node *rb = rb_first_cached(&el->virtual);
while (rb) {
struct virtual_engine *ve =
rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
struct i915_request *rq = READ_ONCE(ve->request);
/* lazily cleanup after another engine handled rq */
if (!rq || !virtual_matches(ve, rq, engine)) {
rb_erase_cached(rb, &el->virtual);
RB_CLEAR_NODE(rb);
rb = rb_first_cached(&el->virtual);
continue;
}
return ve;
}
return NULL;
}
static void virtual_xfer_context(struct virtual_engine *ve,
struct intel_engine_cs *engine)
{
unsigned int n;
if (likely(engine == ve->siblings[0]))
return;
GEM_BUG_ON(READ_ONCE(ve->context.inflight));
if (!intel_engine_has_relative_mmio(engine))
lrc_update_offsets(&ve->context, engine);
/*
* Move the bound engine to the top of the list for
* future execution. We then kick this tasklet first
* before checking others, so that we preferentially
* reuse this set of bound registers.
*/
for (n = 1; n < ve->num_siblings; n++) {
if (ve->siblings[n] == engine) {
swap(ve->siblings[n], ve->siblings[0]);
break;
}
}
}
static void defer_request(struct i915_request *rq, struct list_head * const pl)
{
LIST_HEAD(list);
/*
* We want to move the interrupted request to the back of
* the round-robin list (i.e. its priority level), but
* in doing so, we must then move all requests that were in
* flight and were waiting for the interrupted request to
* be run after it again.
*/
do {
struct i915_dependency *p;
GEM_BUG_ON(i915_request_is_active(rq));
list_move_tail(&rq->sched.link, pl);
for_each_waiter(p, rq) {
struct i915_request *w =
container_of(p->waiter, typeof(*w), sched);
if (p->flags & I915_DEPENDENCY_WEAK)
continue;
/* Leave semaphores spinning on the other engines */
if (w->engine != rq->engine)
continue;
/* No waiter should start before its signaler */
GEM_BUG_ON(i915_request_has_initial_breadcrumb(w) &&
__i915_request_has_started(w) &&
!__i915_request_is_complete(rq));
if (!i915_request_is_ready(w))
continue;
if (rq_prio(w) < rq_prio(rq))
continue;
GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
GEM_BUG_ON(i915_request_is_active(w));
list_move_tail(&w->sched.link, &list);
}
rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
} while (rq);
}
static void defer_active(struct intel_engine_cs *engine)
{
struct i915_request *rq;
rq = __unwind_incomplete_requests(engine);
if (!rq)
return;
defer_request(rq, i915_sched_lookup_priolist(engine->sched_engine,
rq_prio(rq)));
}
static bool
timeslice_yield(const struct intel_engine_execlists *el,
const struct i915_request *rq)
{
/*
* Once bitten, forever smitten!
*
* If the active context ever busy-waited on a semaphore,
* it will be treated as a hog until the end of its timeslice (i.e.
* until it is scheduled out and replaced by a new submission,
* possibly even its own lite-restore). The HW only sends an interrupt
* on the first miss, and we do know if that semaphore has been
* signaled, or even if it is now stuck on another semaphore. Play
* safe, yield if it might be stuck -- it will be given a fresh
* timeslice in the near future.
*/
return rq->context->lrc.ccid == READ_ONCE(el->yield);
}
static bool needs_timeslice(const struct intel_engine_cs *engine,
const struct i915_request *rq)
{
if (!intel_engine_has_timeslices(engine))
return false;
/* If not currently active, or about to switch, wait for next event */
if (!rq || __i915_request_is_complete(rq))
return false;
/* We do not need to start the timeslice until after the ACK */
if (READ_ONCE(engine->execlists.pending[0]))
return false;
/* If ELSP[1] is occupied, always check to see if worth slicing */
if (!list_is_last_rcu(&rq->sched.link,
&engine->sched_engine->requests)) {
ENGINE_TRACE(engine, "timeslice required for second inflight context\n");
return true;
}
/* Otherwise, ELSP[0] is by itself, but may be waiting in the queue */
if (!i915_sched_engine_is_empty(engine->sched_engine)) {
ENGINE_TRACE(engine, "timeslice required for queue\n");
return true;
}
if (!RB_EMPTY_ROOT(&engine->execlists.virtual.rb_root)) {
ENGINE_TRACE(engine, "timeslice required for virtual\n");
return true;
}
return false;
}
static bool
timeslice_expired(struct intel_engine_cs *engine, const struct i915_request *rq)
{
const struct intel_engine_execlists *el = &engine->execlists;
if (i915_request_has_nopreempt(rq) && __i915_request_has_started(rq))
return false;
if (!needs_timeslice(engine, rq))
return false;
return timer_expired(&el->timer) || timeslice_yield(el, rq);
}
static unsigned long timeslice(const struct intel_engine_cs *engine)
{
return READ_ONCE(engine->props.timeslice_duration_ms);
}
static void start_timeslice(struct intel_engine_cs *engine)
{
struct intel_engine_execlists *el = &engine->execlists;
unsigned long duration;
/* Disable the timer if there is nothing to switch to */
duration = 0;
if (needs_timeslice(engine, *el->active)) {
/* Avoid continually prolonging an active timeslice */
if (timer_active(&el->timer)) {
/*
* If we just submitted a new ELSP after an old
* context, that context may have already consumed
* its timeslice, so recheck.
*/
if (!timer_pending(&el->timer))
tasklet_hi_schedule(&engine->sched_engine->tasklet);
return;
}
duration = timeslice(engine);
}
set_timer_ms(&el->timer, duration);
}
static void record_preemption(struct intel_engine_execlists *execlists)
{
(void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
}
static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
const struct i915_request *rq)
{
if (!rq)
return 0;
/* Only allow ourselves to force reset the currently active context */
engine->execlists.preempt_target = rq;
/* Force a fast reset for terminated contexts (ignoring sysfs!) */
if (unlikely(intel_context_is_banned(rq->context) || bad_request(rq)))
return INTEL_CONTEXT_BANNED_PREEMPT_TIMEOUT_MS;
return READ_ONCE(engine->props.preempt_timeout_ms);
}
static void set_preempt_timeout(struct intel_engine_cs *engine,
const struct i915_request *rq)
{
if (!intel_engine_has_preempt_reset(engine))
return;
set_timer_ms(&engine->execlists.preempt,
active_preempt_timeout(engine, rq));
}
static bool completed(const struct i915_request *rq)
{
if (i915_request_has_sentinel(rq))
return false;
return __i915_request_is_complete(rq);
}
static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_sched_engine * const sched_engine = engine->sched_engine;
struct i915_request **port = execlists->pending;
struct i915_request ** const last_port = port + execlists->port_mask;
struct i915_request *last, * const *active;
struct virtual_engine *ve;
struct rb_node *rb;
bool submit = false;
/*
* Hardware submission is through 2 ports. Conceptually each port
* has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
* static for a context, and unique to each, so we only execute
* requests belonging to a single context from each ring. RING_HEAD
* is maintained by the CS in the context image, it marks the place
* where it got up to last time, and through RING_TAIL we tell the CS
* where we want to execute up to this time.
*
* In this list the requests are in order of execution. Consecutive
* requests from the same context are adjacent in the ringbuffer. We
* can combine these requests into a single RING_TAIL update:
*
* RING_HEAD...req1...req2
* ^- RING_TAIL
* since to execute req2 the CS must first execute req1.
*
* Our goal then is to point each port to the end of a consecutive
* sequence of requests as being the most optimal (fewest wake ups
* and context switches) submission.
*/
spin_lock(&sched_engine->lock);
/*
* If the queue is higher priority than the last
* request in the currently active context, submit afresh.
* We will resubmit again afterwards in case we need to split
* the active context to interject the preemption request,
* i.e. we will retrigger preemption following the ack in case
* of trouble.
*
*/
active = execlists->active;
while ((last = *active) && completed(last))
active++;
if (last) {
if (need_preempt(engine, last)) {
ENGINE_TRACE(engine,
"preempting last=%llx:%lld, prio=%d, hint=%d\n",
last->fence.context,
last->fence.seqno,
last->sched.attr.priority,
sched_engine->queue_priority_hint);
record_preemption(execlists);
/*
* Don't let the RING_HEAD advance past the breadcrumb
* as we unwind (and until we resubmit) so that we do
* not accidentally tell it to go backwards.
*/
ring_set_paused(engine, 1);
/*
* Note that we have not stopped the GPU at this point,
* so we are unwinding the incomplete requests as they
* remain inflight and so by the time we do complete
* the preemption, some of the unwound requests may
* complete!
*/
__unwind_incomplete_requests(engine);
last = NULL;
} else if (timeslice_expired(engine, last)) {
ENGINE_TRACE(engine,
"expired:%s last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
str_yes_no(timer_expired(&execlists->timer)),
last->fence.context, last->fence.seqno,
rq_prio(last),
sched_engine->queue_priority_hint,
str_yes_no(timeslice_yield(execlists, last)));
/*
* Consume this timeslice; ensure we start a new one.
*
* The timeslice expired, and we will unwind the
* running contexts and recompute the next ELSP.
* If that submit will be the same pair of contexts
* (due to dependency ordering), we will skip the
* submission. If we don't cancel the timer now,
* we will see that the timer has expired and
* reschedule the tasklet; continually until the
* next context switch or other preemption event.
*
* Since we have decided to reschedule based on
* consumption of this timeslice, if we submit the
* same context again, grant it a full timeslice.
*/
cancel_timer(&execlists->timer);
ring_set_paused(engine, 1);
defer_active(engine);
/*
* Unlike for preemption, if we rewind and continue
* executing the same context as previously active,
* the order of execution will remain the same and
* the tail will only advance. We do not need to
* force a full context restore, as a lite-restore
* is sufficient to resample the monotonic TAIL.
*
* If we switch to any other context, similarly we
* will not rewind TAIL of current context, and
* normal save/restore will preserve state and allow
* us to later continue executing the same request.
*/
last = NULL;
} else {
/*
* Otherwise if we already have a request pending
* for execution after the current one, we can
* just wait until the next CS event before
* queuing more. In either case we will force a
* lite-restore preemption event, but if we wait
* we hopefully coalesce several updates into a single
* submission.
*/
if (active[1]) {
/*
* Even if ELSP[1] is occupied and not worthy
* of timeslices, our queue might be.
*/
spin_unlock(&sched_engine->lock);
return;
}
}
}
/* XXX virtual is always taking precedence */
while ((ve = first_virtual_engine(engine))) {
struct i915_request *rq;
spin_lock(&ve->base.sched_engine->lock);
rq = ve->request;
if (unlikely(!virtual_matches(ve, rq, engine)))
goto unlock; /* lost the race to a sibling */
GEM_BUG_ON(rq->engine != &ve->base);
GEM_BUG_ON(rq->context != &ve->context);
if (unlikely(rq_prio(rq) < queue_prio(sched_engine))) {
spin_unlock(&ve->base.sched_engine->lock);
break;
}
if (last && !can_merge_rq(last, rq)) {
spin_unlock(&ve->base.sched_engine->lock);
spin_unlock(&engine->sched_engine->lock);
return; /* leave this for another sibling */
}
ENGINE_TRACE(engine,
"virtual rq=%llx:%lld%s, new engine? %s\n",
rq->fence.context,
rq->fence.seqno,
__i915_request_is_complete(rq) ? "!" :
__i915_request_has_started(rq) ? "*" :
"",
str_yes_no(engine != ve->siblings[0]));
WRITE_ONCE(ve->request, NULL);
WRITE_ONCE(ve->base.sched_engine->queue_priority_hint, INT_MIN);
rb = &ve->nodes[engine->id].rb;
rb_erase_cached(rb, &execlists->virtual);
RB_CLEAR_NODE(rb);
GEM_BUG_ON(!(rq->execution_mask & engine->mask));
WRITE_ONCE(rq->engine, engine);
if (__i915_request_submit(rq)) {
/*
* Only after we confirm that we will submit
* this request (i.e. it has not already
* completed), do we want to update the context.
*
* This serves two purposes. It avoids
* unnecessary work if we are resubmitting an
* already completed request after timeslicing.
* But more importantly, it prevents us altering
* ve->siblings[] on an idle context, where
* we may be using ve->siblings[] in
* virtual_context_enter / virtual_context_exit.
*/
virtual_xfer_context(ve, engine);
GEM_BUG_ON(ve->siblings[0] != engine);
submit = true;
last = rq;
}
i915_request_put(rq);
unlock:
spin_unlock(&ve->base.sched_engine->lock);
/*
* Hmm, we have a bunch of virtual engine requests,
* but the first one was already completed (thanks
* preempt-to-busy!). Keep looking at the veng queue
* until we have no more relevant requests (i.e.
* the normal submit queue has higher priority).
*/
if (submit)
break;
}
while ((rb = rb_first_cached(&sched_engine->queue))) {
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
priolist_for_each_request_consume(rq, rn, p) {
bool merge = true;
/*
* Can we combine this request with the current port?
* It has to be the same context/ringbuffer and not
* have any exceptions (e.g. GVT saying never to
* combine contexts).
*
* If we can combine the requests, we can execute both
* by updating the RING_TAIL to point to the end of the
* second request, and so we never need to tell the
* hardware about the first.
*/
if (last && !can_merge_rq(last, rq)) {
/*
* If we are on the second port and cannot
* combine this request with the last, then we
* are done.
*/
if (port == last_port)
goto done;
/*
* We must not populate both ELSP[] with the
* same LRCA, i.e. we must submit 2 different
* contexts if we submit 2 ELSP.
*/
if (last->context == rq->context)
goto done;
if (i915_request_has_sentinel(last))
goto done;
/*
* We avoid submitting virtual requests into
* the secondary ports so that we can migrate
* the request immediately to another engine
* rather than wait for the primary request.
*/
if (rq->execution_mask != engine->mask)
goto done;
/*
* If GVT overrides us we only ever submit
* port[0], leaving port[1] empty. Note that we
* also have to be careful that we don't queue
* the same context (even though a different
* request) to the second port.
*/
if (ctx_single_port_submission(last->context) ||
ctx_single_port_submission(rq->context))
goto done;
merge = false;
}
if (__i915_request_submit(rq)) {
if (!merge) {
*port++ = i915_request_get(last);
last = NULL;
}
GEM_BUG_ON(last &&
!can_merge_ctx(last->context,
rq->context));
GEM_BUG_ON(last &&
i915_seqno_passed(last->fence.seqno,
rq->fence.seqno));
submit = true;
last = rq;
}
}
rb_erase_cached(&p->node, &sched_engine->queue);
i915_priolist_free(p);
}
done:
*port++ = i915_request_get(last);
/*
* Here be a bit of magic! Or sleight-of-hand, whichever you prefer.
*
* We choose the priority hint such that if we add a request of greater
* priority than this, we kick the submission tasklet to decide on
* the right order of submitting the requests to hardware. We must
* also be prepared to reorder requests as they are in-flight on the
* HW. We derive the priority hint then as the first "hole" in
* the HW submission ports and if there are no available slots,
* the priority of the lowest executing request, i.e. last.
*
* When we do receive a higher priority request ready to run from the
* user, see queue_request(), the priority hint is bumped to that
* request triggering preemption on the next dequeue (or subsequent
* interrupt for secondary ports).
*/
sched_engine->queue_priority_hint = queue_prio(sched_engine);
i915_sched_engine_reset_on_empty(sched_engine);
spin_unlock(&sched_engine->lock);
/*
* We can skip poking the HW if we ended up with exactly the same set
* of requests as currently running, e.g. trying to timeslice a pair
* of ordered contexts.
*/
if (submit &&
memcmp(active,
execlists->pending,
(port - execlists->pending) * sizeof(*port))) {
*port = NULL;
while (port-- != execlists->pending)
execlists_schedule_in(*port, port - execlists->pending);
WRITE_ONCE(execlists->yield, -1);
set_preempt_timeout(engine, *active);
execlists_submit_ports(engine);
} else {
ring_set_paused(engine, 0);
while (port-- != execlists->pending)
i915_request_put(*port);
*execlists->pending = NULL;
}
}
static void execlists_dequeue_irq(struct intel_engine_cs *engine)
{
local_irq_disable(); /* Suspend interrupts across request submission */
execlists_dequeue(engine);
local_irq_enable(); /* flush irq_work (e.g. breadcrumb enabling) */
}
static void clear_ports(struct i915_request **ports, int count)
{
memset_p((void **)ports, NULL, count);
}
static void
copy_ports(struct i915_request **dst, struct i915_request **src, int count)
{
/* A memcpy_p() would be very useful here! */
while (count--)
WRITE_ONCE(*dst++, *src++); /* avoid write tearing */
}
static struct i915_request **
cancel_port_requests(struct intel_engine_execlists * const execlists,
struct i915_request **inactive)
{
struct i915_request * const *port;
for (port = execlists->pending; *port; port++)
*inactive++ = *port;
clear_ports(execlists->pending, ARRAY_SIZE(execlists->pending));
/* Mark the end of active before we overwrite *active */
for (port = xchg(&execlists->active, execlists->pending); *port; port++)
*inactive++ = *port;
clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight));
smp_wmb(); /* complete the seqlock for execlists_active() */
WRITE_ONCE(execlists->active, execlists->inflight);
/* Having cancelled all outstanding process_csb(), stop their timers */
GEM_BUG_ON(execlists->pending[0]);
cancel_timer(&execlists->timer);
cancel_timer(&execlists->preempt);
return inactive;
}
/*
* Starting with Gen12, the status has a new format:
*
* bit 0: switched to new queue
* bit 1: reserved
* bit 2: semaphore wait mode (poll or signal), only valid when
* switch detail is set to "wait on semaphore"
* bits 3-5: engine class
* bits 6-11: engine instance
* bits 12-14: reserved
* bits 15-25: sw context id of the lrc the GT switched to
* bits 26-31: sw counter of the lrc the GT switched to
* bits 32-35: context switch detail
* - 0: ctx complete
* - 1: wait on sync flip
* - 2: wait on vblank
* - 3: wait on scanline
* - 4: wait on semaphore
* - 5: context preempted (not on SEMAPHORE_WAIT or
* WAIT_FOR_EVENT)
* bit 36: reserved
* bits 37-43: wait detail (for switch detail 1 to 4)
* bits 44-46: reserved
* bits 47-57: sw context id of the lrc the GT switched away from
* bits 58-63: sw counter of the lrc the GT switched away from
*
* Xe_HP csb shuffles things around compared to TGL:
*
* bits 0-3: context switch detail (same possible values as TGL)
* bits 4-9: engine instance
* bits 10-25: sw context id of the lrc the GT switched to
* bits 26-31: sw counter of the lrc the GT switched to
* bit 32: semaphore wait mode (poll or signal), Only valid when
* switch detail is set to "wait on semaphore"
* bit 33: switched to new queue
* bits 34-41: wait detail (for switch detail 1 to 4)
* bits 42-57: sw context id of the lrc the GT switched away from
* bits 58-63: sw counter of the lrc the GT switched away from
*/
static inline bool
__gen12_csb_parse(bool ctx_to_valid, bool ctx_away_valid, bool new_queue,
u8 switch_detail)
{
/*
* The context switch detail is not guaranteed to be 5 when a preemption
* occurs, so we can't just check for that. The check below works for
* all the cases we care about, including preemptions of WAIT
* instructions and lite-restore. Preempt-to-idle via the CTRL register
* would require some extra handling, but we don't support that.
*/
if (!ctx_away_valid || new_queue) {
GEM_BUG_ON(!ctx_to_valid);
return true;
}
/*
* switch detail = 5 is covered by the case above and we do not expect a
* context switch on an unsuccessful wait instruction since we always
* use polling mode.
*/
GEM_BUG_ON(switch_detail);
return false;
}
static bool xehp_csb_parse(const u64 csb)
{
return __gen12_csb_parse(XEHP_CSB_CTX_VALID(lower_32_bits(csb)), /* cxt to */
XEHP_CSB_CTX_VALID(upper_32_bits(csb)), /* cxt away */
upper_32_bits(csb) & XEHP_CTX_STATUS_SWITCHED_TO_NEW_QUEUE,
GEN12_CTX_SWITCH_DETAIL(lower_32_bits(csb)));
}
static bool gen12_csb_parse(const u64 csb)
{
return __gen12_csb_parse(GEN12_CSB_CTX_VALID(lower_32_bits(csb)), /* cxt to */
GEN12_CSB_CTX_VALID(upper_32_bits(csb)), /* cxt away */
lower_32_bits(csb) & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE,
GEN12_CTX_SWITCH_DETAIL(upper_32_bits(csb)));
}
static bool gen8_csb_parse(const u64 csb)
{
return csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED);
}
static noinline u64
wa_csb_read(const struct intel_engine_cs *engine, u64 * const csb)
{
u64 entry;
/*
* Reading from the HWSP has one particular advantage: we can detect
* a stale entry. Since the write into HWSP is broken, we have no reason
* to trust the HW at all, the mmio entry may equally be unordered, so
* we prefer the path that is self-checking and as a last resort,
* return the mmio value.
*
* tgl,dg1:HSDES#22011327657
*/
preempt_disable();
if (wait_for_atomic_us((entry = READ_ONCE(*csb)) != -1, 10)) {
int idx = csb - engine->execlists.csb_status;
int status;
status = GEN8_EXECLISTS_STATUS_BUF;
if (idx >= 6) {
status = GEN11_EXECLISTS_STATUS_BUF2;
idx -= 6;
}
status += sizeof(u64) * idx;
entry = intel_uncore_read64(engine->uncore,
_MMIO(engine->mmio_base + status));
}
preempt_enable();
return entry;
}
static u64 csb_read(const struct intel_engine_cs *engine, u64 * const csb)
{
u64 entry = READ_ONCE(*csb);
/*
* Unfortunately, the GPU does not always serialise its write
* of the CSB entries before its write of the CSB pointer, at least
* from the perspective of the CPU, using what is known as a Global
* Observation Point. We may read a new CSB tail pointer, but then
* read the stale CSB entries, causing us to misinterpret the
* context-switch events, and eventually declare the GPU hung.
*
* icl:HSDES#1806554093
* tgl:HSDES#22011248461
*/
if (unlikely(entry == -1))
entry = wa_csb_read(engine, csb);
/* Consume this entry so that we can spot its future reuse. */
WRITE_ONCE(*csb, -1);
/* ELSP is an implicit wmb() before the GPU wraps and overwrites csb */
return entry;
}
static void new_timeslice(struct intel_engine_execlists *el)
{
/* By cancelling, we will start afresh in start_timeslice() */
cancel_timer(&el->timer);
}
static struct i915_request **
process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
u64 * const buf = execlists->csb_status;
const u8 num_entries = execlists->csb_size;
struct i915_request **prev;
u8 head, tail;
/*
* As we modify our execlists state tracking we require exclusive
* access. Either we are inside the tasklet, or the tasklet is disabled
* and we assume that is only inside the reset paths and so serialised.
*/
GEM_BUG_ON(!tasklet_is_locked(&engine->sched_engine->tasklet) &&
!reset_in_progress(engine));
/*
* Note that csb_write, csb_status may be either in HWSP or mmio.
* When reading from the csb_write mmio register, we have to be
* careful to only use the GEN8_CSB_WRITE_PTR portion, which is
* the low 4bits. As it happens we know the next 4bits are always
* zero and so we can simply masked off the low u8 of the register
* and treat it identically to reading from the HWSP (without having
* to use explicit shifting and masking, and probably bifurcating
* the code to handle the legacy mmio read).
*/
head = execlists->csb_head;
tail = READ_ONCE(*execlists->csb_write);
if (unlikely(head == tail))
return inactive;
/*
* We will consume all events from HW, or at least pretend to.
*
* The sequence of events from the HW is deterministic, and derived
* from our writes to the ELSP, with a smidgen of variability for
* the arrival of the asynchronous requests wrt to the inflight
* execution. If the HW sends an event that does not correspond with
* the one we are expecting, we have to abandon all hope as we lose
* all tracking of what the engine is actually executing. We will
* only detect we are out of sequence with the HW when we get an
* 'impossible' event because we have already drained our own
* preemption/promotion queue. If this occurs, we know that we likely
* lost track of execution earlier and must unwind and restart, the
* simplest way is by stop processing the event queue and force the
* engine to reset.
*/
execlists->csb_head = tail;
ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
/*
* Hopefully paired with a wmb() in HW!
*
* We must complete the read of the write pointer before any reads
* from the CSB, so that we do not see stale values. Without an rmb
* (lfence) the HW may speculatively perform the CSB[] reads *before*
* we perform the READ_ONCE(*csb_write).
*/
rmb();
/* Remember who was last running under the timer */
prev = inactive;
*prev = NULL;
do {
bool promote;
u64 csb;
if (++head == num_entries)
head = 0;
/*
* We are flying near dragons again.
*
* We hold a reference to the request in execlist_port[]
* but no more than that. We are operating in softirq
* context and so cannot hold any mutex or sleep. That
* prevents us stopping the requests we are processing
* in port[] from being retired simultaneously (the
* breadcrumb will be complete before we see the
* context-switch). As we only hold the reference to the
* request, any pointer chasing underneath the request
* is subject to a potential use-after-free. Thus we
* store all of the bookkeeping within port[] as
* required, and avoid using unguarded pointers beneath
* request itself. The same applies to the atomic
* status notifier.
*/
csb = csb_read(engine, buf + head);
ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
head, upper_32_bits(csb), lower_32_bits(csb));
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
promote = xehp_csb_parse(csb);
else if (GRAPHICS_VER(engine->i915) >= 12)
promote = gen12_csb_parse(csb);
else
promote = gen8_csb_parse(csb);
if (promote) {
struct i915_request * const *old = execlists->active;
if (GEM_WARN_ON(!*execlists->pending)) {
execlists->error_interrupt |= ERROR_CSB;
break;
}
ring_set_paused(engine, 0);
/* Point active to the new ELSP; prevent overwriting */
WRITE_ONCE(execlists->active, execlists->pending);
smp_wmb(); /* notify execlists_active() */
/* cancel old inflight, prepare for switch */
trace_ports(execlists, "preempted", old);
while (*old)
*inactive++ = *old++;
/* switch pending to inflight */
GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
copy_ports(execlists->inflight,
execlists->pending,
execlists_num_ports(execlists));
smp_wmb(); /* complete the seqlock */
WRITE_ONCE(execlists->active, execlists->inflight);
/* XXX Magic delay for tgl */
ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
WRITE_ONCE(execlists->pending[0], NULL);
} else {
if (GEM_WARN_ON(!*execlists->active)) {
execlists->error_interrupt |= ERROR_CSB;
break;
}
/* port0 completed, advanced to port1 */
trace_ports(execlists, "completed", execlists->active);
/*
* We rely on the hardware being strongly
* ordered, that the breadcrumb write is
* coherent (visible from the CPU) before the
* user interrupt is processed. One might assume
* that the breadcrumb write being before the
* user interrupt and the CS event for the context
* switch would therefore be before the CS event
* itself...
*/
if (GEM_SHOW_DEBUG() &&
!__i915_request_is_complete(*execlists->active)) {
struct i915_request *rq = *execlists->active;
const u32 *regs __maybe_unused =
rq->context->lrc_reg_state;
ENGINE_TRACE(engine,
"context completed before request!\n");
ENGINE_TRACE(engine,
"ring:{start:0x%08x, head:%04x, tail:%04x, ctl:%08x, mode:%08x}\n",
ENGINE_READ(engine, RING_START),
ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR,
ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR,
ENGINE_READ(engine, RING_CTL),
ENGINE_READ(engine, RING_MI_MODE));
ENGINE_TRACE(engine,
"rq:{start:%08x, head:%04x, tail:%04x, seqno:%llx:%d, hwsp:%d}, ",
i915_ggtt_offset(rq->ring->vma),
rq->head, rq->tail,
rq->fence.context,
lower_32_bits(rq->fence.seqno),
hwsp_seqno(rq));
ENGINE_TRACE(engine,
"ctx:{start:%08x, head:%04x, tail:%04x}, ",
regs[CTX_RING_START],
regs[CTX_RING_HEAD],
regs[CTX_RING_TAIL]);
}
*inactive++ = *execlists->active++;
GEM_BUG_ON(execlists->active - execlists->inflight >
execlists_num_ports(execlists));
}
} while (head != tail);
/*
* Gen11 has proven to fail wrt global observation point between
* entry and tail update, failing on the ordering and thus
* we see an old entry in the context status buffer.
*
* Forcibly evict out entries for the next gpu csb update,
* to increase the odds that we get a fresh entries with non
* working hardware. The cost for doing so comes out mostly with
* the wash as hardware, working or not, will need to do the
* invalidation before.
*/
drm_clflush_virt_range(&buf[0], num_entries * sizeof(buf[0]));
/*
* We assume that any event reflects a change in context flow
* and merits a fresh timeslice. We reinstall the timer after
* inspecting the queue to see if we need to resumbit.
*/
if (*prev != *execlists->active) { /* elide lite-restores */
struct intel_context *prev_ce = NULL, *active_ce = NULL;
/*
* Note the inherent discrepancy between the HW runtime,
* recorded as part of the context switch, and the CPU
* adjustment for active contexts. We have to hope that
* the delay in processing the CS event is very small
* and consistent. It works to our advantage to have
* the CPU adjustment _undershoot_ (i.e. start later than)
* the CS timestamp so we never overreport the runtime
* and correct overselves later when updating from HW.
*/
if (*prev)
prev_ce = (*prev)->context;
if (*execlists->active)
active_ce = (*execlists->active)->context;
if (prev_ce != active_ce) {
if (prev_ce)
lrc_runtime_stop(prev_ce);
if (active_ce)
lrc_runtime_start(active_ce);
}
new_timeslice(execlists);
}
return inactive;
}
static void post_process_csb(struct i915_request **port,
struct i915_request **last)
{
while (port != last)
execlists_schedule_out(*port++);
}
static void __execlists_hold(struct i915_request *rq)
{
LIST_HEAD(list);
do {
struct i915_dependency *p;
if (i915_request_is_active(rq))
__i915_request_unsubmit(rq);
clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
list_move_tail(&rq->sched.link,
&rq->engine->sched_engine->hold);
i915_request_set_hold(rq);
RQ_TRACE(rq, "on hold\n");
for_each_waiter(p, rq) {
struct i915_request *w =
container_of(p->waiter, typeof(*w), sched);
if (p->flags & I915_DEPENDENCY_WEAK)
continue;
/* Leave semaphores spinning on the other engines */
if (w->engine != rq->engine)
continue;
if (!i915_request_is_ready(w))
continue;
if (__i915_request_is_complete(w))
continue;
if (i915_request_on_hold(w))
continue;
list_move_tail(&w->sched.link, &list);
}
rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
} while (rq);
}
static bool execlists_hold(struct intel_engine_cs *engine,
struct i915_request *rq)
{
if (i915_request_on_hold(rq))
return false;
spin_lock_irq(&engine->sched_engine->lock);
if (__i915_request_is_complete(rq)) { /* too late! */
rq = NULL;
goto unlock;
}
/*
* Transfer this request onto the hold queue to prevent it
* being resumbitted to HW (and potentially completed) before we have
* released it. Since we may have already submitted following
* requests, we need to remove those as well.
*/
GEM_BUG_ON(i915_request_on_hold(rq));
GEM_BUG_ON(rq->engine != engine);
__execlists_hold(rq);
GEM_BUG_ON(list_empty(&engine->sched_engine->hold));
unlock:
spin_unlock_irq(&engine->sched_engine->lock);
return rq;
}
static bool hold_request(const struct i915_request *rq)
{
struct i915_dependency *p;
bool result = false;
/*
* If one of our ancestors is on hold, we must also be on hold,
* otherwise we will bypass it and execute before it.
*/
rcu_read_lock();
for_each_signaler(p, rq) {
const struct i915_request *s =
container_of(p->signaler, typeof(*s), sched);
if (s->engine != rq->engine)
continue;
result = i915_request_on_hold(s);
if (result)
break;
}
rcu_read_unlock();
return result;
}
static void __execlists_unhold(struct i915_request *rq)
{
LIST_HEAD(list);
do {
struct i915_dependency *p;
RQ_TRACE(rq, "hold release\n");
GEM_BUG_ON(!i915_request_on_hold(rq));
GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
i915_request_clear_hold(rq);
list_move_tail(&rq->sched.link,
i915_sched_lookup_priolist(rq->engine->sched_engine,
rq_prio(rq)));
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
/* Also release any children on this engine that are ready */
for_each_waiter(p, rq) {
struct i915_request *w =
container_of(p->waiter, typeof(*w), sched);
if (p->flags & I915_DEPENDENCY_WEAK)
continue;
if (w->engine != rq->engine)
continue;
if (!i915_request_on_hold(w))
continue;
/* Check that no other parents are also on hold */
if (hold_request(w))
continue;
list_move_tail(&w->sched.link, &list);
}
rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
} while (rq);
}
static void execlists_unhold(struct intel_engine_cs *engine,
struct i915_request *rq)
{
spin_lock_irq(&engine->sched_engine->lock);
/*
* Move this request back to the priority queue, and all of its
* children and grandchildren that were suspended along with it.
*/
__execlists_unhold(rq);
if (rq_prio(rq) > engine->sched_engine->queue_priority_hint) {
engine->sched_engine->queue_priority_hint = rq_prio(rq);
tasklet_hi_schedule(&engine->sched_engine->tasklet);
}
spin_unlock_irq(&engine->sched_engine->lock);
}
struct execlists_capture {
struct work_struct work;
struct i915_request *rq;
struct i915_gpu_coredump *error;
};
static void execlists_capture_work(struct work_struct *work)
{
struct execlists_capture *cap = container_of(work, typeof(*cap), work);
const gfp_t gfp = __GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL |
__GFP_NOWARN;
struct intel_engine_cs *engine = cap->rq->engine;
struct intel_gt_coredump *gt = cap->error->gt;
struct intel_engine_capture_vma *vma;
/* Compress all the objects attached to the request, slow! */
vma = intel_engine_coredump_add_request(gt->engine, cap->rq, gfp);
if (vma) {
struct i915_vma_compress *compress =
i915_vma_capture_prepare(gt);
intel_engine_coredump_add_vma(gt->engine, vma, compress);
i915_vma_capture_finish(gt, compress);
}
gt->simulated = gt->engine->simulated;
cap->error->simulated = gt->simulated;
/* Publish the error state, and announce it to the world */
i915_error_state_store(cap->error);
i915_gpu_coredump_put(cap->error);
/* Return this request and all that depend upon it for signaling */
execlists_unhold(engine, cap->rq);
i915_request_put(cap->rq);
kfree(cap);
}
static struct execlists_capture *capture_regs(struct intel_engine_cs *engine)
{
const gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
struct execlists_capture *cap;
cap = kmalloc(sizeof(*cap), gfp);
if (!cap)
return NULL;
cap->error = i915_gpu_coredump_alloc(engine->i915, gfp);
if (!cap->error)
goto err_cap;
cap->error->gt = intel_gt_coredump_alloc(engine->gt, gfp, CORE_DUMP_FLAG_NONE);
if (!cap->error->gt)
goto err_gpu;
cap->error->gt->engine = intel_engine_coredump_alloc(engine, gfp, CORE_DUMP_FLAG_NONE);
if (!cap->error->gt->engine)
goto err_gt;
cap->error->gt->engine->hung = true;
return cap;
err_gt:
kfree(cap->error->gt);
err_gpu:
kfree(cap->error);
err_cap:
kfree(cap);
return NULL;
}
static struct i915_request *
active_context(struct intel_engine_cs *engine, u32 ccid)
{
const struct intel_engine_execlists * const el = &engine->execlists;
struct i915_request * const *port, *rq;
/*
* Use the most recent result from process_csb(), but just in case
* we trigger an error (via interrupt) before the first CS event has
* been written, peek at the next submission.
*/
for (port = el->active; (rq = *port); port++) {
if (rq->context->lrc.ccid == ccid) {
ENGINE_TRACE(engine,
"ccid:%x found at active:%zd\n",
ccid, port - el->active);
return rq;
}
}
for (port = el->pending; (rq = *port); port++) {
if (rq->context->lrc.ccid == ccid) {
ENGINE_TRACE(engine,
"ccid:%x found at pending:%zd\n",
ccid, port - el->pending);
return rq;
}
}
ENGINE_TRACE(engine, "ccid:%x not found\n", ccid);
return NULL;
}
static u32 active_ccid(struct intel_engine_cs *engine)
{
return ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI);
}
static void execlists_capture(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
struct execlists_capture *cap;
if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR))
return;
/*
* We need to _quickly_ capture the engine state before we reset.
* We are inside an atomic section (softirq) here and we are delaying
* the forced preemption event.
*/
cap = capture_regs(engine);
if (!cap)
return;
spin_lock_irq(&engine->sched_engine->lock);
cap->rq = active_context(engine, active_ccid(engine));
if (cap->rq) {
cap->rq = active_request(cap->rq->context->timeline, cap->rq);
cap->rq = i915_request_get_rcu(cap->rq);
}
spin_unlock_irq(&engine->sched_engine->lock);
if (!cap->rq)
goto err_free;
/*
* Remove the request from the execlists queue, and take ownership
* of the request. We pass it to our worker who will _slowly_ compress
* all the pages the _user_ requested for debugging their batch, after
* which we return it to the queue for signaling.
*
* By removing them from the execlists queue, we also remove the
* requests from being processed by __unwind_incomplete_requests()
* during the intel_engine_reset(), and so they will *not* be replayed
* afterwards.
*
* Note that because we have not yet reset the engine at this point,
* it is possible for the request that we have identified as being
* guilty, did in fact complete and we will then hit an arbitration
* point allowing the outstanding preemption to succeed. The likelihood
* of that is very low (as capturing of the engine registers should be
* fast enough to run inside an irq-off atomic section!), so we will
* simply hold that request accountable for being non-preemptible
* long enough to force the reset.
*/
if (!execlists_hold(engine, cap->rq))
goto err_rq;
INIT_WORK(&cap->work, execlists_capture_work);
queue_work(i915->unordered_wq, &cap->work);
return;
err_rq:
i915_request_put(cap->rq);
err_free:
i915_gpu_coredump_put(cap->error);
kfree(cap);
}
static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
{
const unsigned int bit = I915_RESET_ENGINE + engine->id;
unsigned long *lock = &engine->gt->reset.flags;
if (!intel_has_reset_engine(engine->gt))
return;
if (test_and_set_bit(bit, lock))
return;
ENGINE_TRACE(engine, "reset for %s\n", msg);
/* Mark this tasklet as disabled to avoid waiting for it to complete */
tasklet_disable_nosync(&engine->sched_engine->tasklet);
ring_set_paused(engine, 1); /* Freeze the current request in place */
execlists_capture(engine);
intel_engine_reset(engine, msg);
tasklet_enable(&engine->sched_engine->tasklet);
clear_and_wake_up_bit(bit, lock);
}
static bool preempt_timeout(const struct intel_engine_cs *const engine)
{
const struct timer_list *t = &engine->execlists.preempt;
if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
return false;
if (!timer_expired(t))
return false;
return engine->execlists.pending[0];
}
/*
* Check the unread Context Status Buffers and manage the submission of new
* contexts to the ELSP accordingly.
*/
static void execlists_submission_tasklet(struct tasklet_struct *t)
{
struct i915_sched_engine *sched_engine =
from_tasklet(sched_engine, t, tasklet);
struct intel_engine_cs * const engine = sched_engine->private_data;
struct i915_request *post[2 * EXECLIST_MAX_PORTS];
struct i915_request **inactive;
rcu_read_lock();
inactive = process_csb(engine, post);
GEM_BUG_ON(inactive - post > ARRAY_SIZE(post));
if (unlikely(preempt_timeout(engine))) {
const struct i915_request *rq = *engine->execlists.active;
/*
* If after the preempt-timeout expired, we are still on the
* same active request/context as before we initiated the
* preemption, reset the engine.
*
* However, if we have processed a CS event to switch contexts,
* but not yet processed the CS event for the pending
* preemption, reset the timer allowing the new context to
* gracefully exit.
*/
cancel_timer(&engine->execlists.preempt);
if (rq == engine->execlists.preempt_target)
engine->execlists.error_interrupt |= ERROR_PREEMPT;
else
set_timer_ms(&engine->execlists.preempt,
active_preempt_timeout(engine, rq));
}
if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) {
const char *msg;
/* Generate the error message in priority wrt to the user! */
if (engine->execlists.error_interrupt & GENMASK(15, 0))
msg = "CS error"; /* thrown by a user payload */
else if (engine->execlists.error_interrupt & ERROR_CSB)
msg = "invalid CSB event";
else if (engine->execlists.error_interrupt & ERROR_PREEMPT)
msg = "preemption time out";
else
msg = "internal error";
engine->execlists.error_interrupt = 0;
execlists_reset(engine, msg);
}
if (!engine->execlists.pending[0]) {
execlists_dequeue_irq(engine);
start_timeslice(engine);
}
post_process_csb(post, inactive);
rcu_read_unlock();
}
static void execlists_irq_handler(struct intel_engine_cs *engine, u16 iir)
{
bool tasklet = false;
if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) {
u32 eir;
/* Upper 16b are the enabling mask, rsvd for internal errors */
eir = ENGINE_READ(engine, RING_EIR) & GENMASK(15, 0);
ENGINE_TRACE(engine, "CS error: %x\n", eir);
/* Disable the error interrupt until after the reset */
if (likely(eir)) {
ENGINE_WRITE(engine, RING_EMR, ~0u);
ENGINE_WRITE(engine, RING_EIR, eir);
WRITE_ONCE(engine->execlists.error_interrupt, eir);
tasklet = true;
}
}
if (iir & GT_WAIT_SEMAPHORE_INTERRUPT) {
WRITE_ONCE(engine->execlists.yield,
ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI));
ENGINE_TRACE(engine, "semaphore yield: %08x\n",
engine->execlists.yield);
if (del_timer(&engine->execlists.timer))
tasklet = true;
}
if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
tasklet = true;
if (iir & GT_RENDER_USER_INTERRUPT)
intel_engine_signal_breadcrumbs(engine);
if (tasklet)
tasklet_hi_schedule(&engine->sched_engine->tasklet);
}
static void __execlists_kick(struct intel_engine_execlists *execlists)
{
struct intel_engine_cs *engine =
container_of(execlists, typeof(*engine), execlists);
/* Kick the tasklet for some interrupt coalescing and reset handling */
tasklet_hi_schedule(&engine->sched_engine->tasklet);
}
#define execlists_kick(t, member) \
__execlists_kick(container_of(t, struct intel_engine_execlists, member))
static void execlists_timeslice(struct timer_list *timer)
{
execlists_kick(timer, timer);
}
static void execlists_preempt(struct timer_list *timer)
{
execlists_kick(timer, preempt);
}
static void queue_request(struct intel_engine_cs *engine,
struct i915_request *rq)
{
GEM_BUG_ON(!list_empty(&rq->sched.link));
list_add_tail(&rq->sched.link,
i915_sched_lookup_priolist(engine->sched_engine,
rq_prio(rq)));
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
}
static bool submit_queue(struct intel_engine_cs *engine,
const struct i915_request *rq)
{
struct i915_sched_engine *sched_engine = engine->sched_engine;
if (rq_prio(rq) <= sched_engine->queue_priority_hint)
return false;
sched_engine->queue_priority_hint = rq_prio(rq);
return true;
}
static bool ancestor_on_hold(const struct intel_engine_cs *engine,
const struct i915_request *rq)
{
GEM_BUG_ON(i915_request_on_hold(rq));
return !list_empty(&engine->sched_engine->hold) && hold_request(rq);
}
static void execlists_submit_request(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
unsigned long flags;
/* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&engine->sched_engine->lock, flags);
if (unlikely(ancestor_on_hold(engine, request))) {
RQ_TRACE(request, "ancestor on hold\n");
list_add_tail(&request->sched.link,
&engine->sched_engine->hold);
i915_request_set_hold(request);
} else {
queue_request(engine, request);
GEM_BUG_ON(i915_sched_engine_is_empty(engine->sched_engine));
GEM_BUG_ON(list_empty(&request->sched.link));
if (submit_queue(engine, request))
__execlists_kick(&engine->execlists);
}
spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
}
static int
__execlists_context_pre_pin(struct intel_context *ce,
struct intel_engine_cs *engine,
struct i915_gem_ww_ctx *ww, void **vaddr)
{
int err;
err = lrc_pre_pin(ce, engine, ww, vaddr);
if (err)
return err;
if (!__test_and_set_bit(CONTEXT_INIT_BIT, &ce->flags)) {
lrc_init_state(ce, engine, *vaddr);
__i915_gem_object_flush_map(ce->state->obj, 0, engine->context_size);
}
return 0;
}
static int execlists_context_pre_pin(struct intel_context *ce,
struct i915_gem_ww_ctx *ww,
void **vaddr)
{
return __execlists_context_pre_pin(ce, ce->engine, ww, vaddr);
}
static int execlists_context_pin(struct intel_context *ce, void *vaddr)
{
return lrc_pin(ce, ce->engine, vaddr);
}
static int execlists_context_alloc(struct intel_context *ce)
{
return lrc_alloc(ce, ce->engine);
}
static void execlists_context_cancel_request(struct intel_context *ce,
struct i915_request *rq)
{
struct intel_engine_cs *engine = NULL;
i915_request_active_engine(rq, &engine);
if (engine && intel_engine_pulse(engine))
intel_gt_handle_error(engine->gt, engine->mask, 0,
"request cancellation by %s",
current->comm);
}
static struct intel_context *
execlists_create_parallel(struct intel_engine_cs **engines,
unsigned int num_siblings,
unsigned int width)
{
struct intel_context *parent = NULL, *ce, *err;
int i;
GEM_BUG_ON(num_siblings != 1);
for (i = 0; i < width; ++i) {
ce = intel_context_create(engines[i]);
if (IS_ERR(ce)) {
err = ce;
goto unwind;
}
if (i == 0)
parent = ce;
else
intel_context_bind_parent_child(parent, ce);
}
parent->parallel.fence_context = dma_fence_context_alloc(1);
intel_context_set_nopreempt(parent);
for_each_child(parent, ce)
intel_context_set_nopreempt(ce);
return parent;
unwind:
if (parent)
intel_context_put(parent);
return err;
}
static const struct intel_context_ops execlists_context_ops = {
.flags = COPS_HAS_INFLIGHT | COPS_RUNTIME_CYCLES,
.alloc = execlists_context_alloc,
.cancel_request = execlists_context_cancel_request,
.pre_pin = execlists_context_pre_pin,
.pin = execlists_context_pin,
.unpin = lrc_unpin,
.post_unpin = lrc_post_unpin,
.enter = intel_context_enter_engine,
.exit = intel_context_exit_engine,
.reset = lrc_reset,
.destroy = lrc_destroy,
.create_parallel = execlists_create_parallel,
.create_virtual = execlists_create_virtual,
};
static int emit_pdps(struct i915_request *rq)
{
const struct intel_engine_cs * const engine = rq->engine;
struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->context->vm);
int err, i;
u32 *cs;
GEM_BUG_ON(intel_vgpu_active(rq->i915));
/*
* Beware ye of the dragons, this sequence is magic!
*
* Small changes to this sequence can cause anything from
* GPU hangs to forcewake errors and machine lockups!
*/
cs = intel_ring_begin(rq, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
/* Flush any residual operations from the context load */
err = engine->emit_flush(rq, EMIT_FLUSH);
if (err)
return err;
/* Magic required to prevent forcewake errors! */
err = engine->emit_flush(rq, EMIT_INVALIDATE);
if (err)
return err;
cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
/* Ensure the LRI have landed before we invalidate & continue */
*cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
for (i = GEN8_3LVL_PDPES; i--; ) {
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
u32 base = engine->mmio_base;
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
*cs++ = upper_32_bits(pd_daddr);
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
*cs++ = lower_32_bits(pd_daddr);
}
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
intel_ring_advance(rq, cs);
intel_ring_advance(rq, cs);
return 0;
}
static int execlists_request_alloc(struct i915_request *request)
{
int ret;
GEM_BUG_ON(!intel_context_is_pinned(request->context));
/*
* Flush enough space to reduce the likelihood of waiting after
* we start building the request - in which case we will just
* have to repeat work.
*/
request->reserved_space += EXECLISTS_REQUEST_SIZE;
/*
* Note that after this point, we have committed to using
* this request as it is being used to both track the
* state of engine initialisation and liveness of the
* golden renderstate above. Think twice before you try
* to cancel/unwind this request now.
*/
if (!i915_vm_is_4lvl(request->context->vm)) {
ret = emit_pdps(request);
if (ret)
return ret;
}
/* Unconditionally invalidate GPU caches and TLBs. */
ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
if (ret)
return ret;
request->reserved_space -= EXECLISTS_REQUEST_SIZE;
return 0;
}
static void reset_csb_pointers(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
const unsigned int reset_value = execlists->csb_size - 1;
ring_set_paused(engine, 0);
/*
* Sometimes Icelake forgets to reset its pointers on a GPU reset.
* Bludgeon them with a mmio update to be sure.
*/
ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
0xffff << 16 | reset_value << 8 | reset_value);
ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
/*
* After a reset, the HW starts writing into CSB entry [0]. We
* therefore have to set our HEAD pointer back one entry so that
* the *first* entry we check is entry 0. To complicate this further,
* as we don't wait for the first interrupt after reset, we have to
* fake the HW write to point back to the last entry so that our
* inline comparison of our cached head position against the last HW
* write works even before the first interrupt.
*/
execlists->csb_head = reset_value;
WRITE_ONCE(*execlists->csb_write, reset_value);
wmb(); /* Make sure this is visible to HW (paranoia?) */
/* Check that the GPU does indeed update the CSB entries! */
memset(execlists->csb_status, -1, (reset_value + 1) * sizeof(u64));
drm_clflush_virt_range(execlists->csb_status,
execlists->csb_size *
sizeof(execlists->csb_status));
/* Once more for luck and our trusty paranoia */
ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
0xffff << 16 | reset_value << 8 | reset_value);
ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR);
GEM_BUG_ON(READ_ONCE(*execlists->csb_write) != reset_value);
}
static void sanitize_hwsp(struct intel_engine_cs *engine)
{
struct intel_timeline *tl;
list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
intel_timeline_reset_seqno(tl);
}
static void execlists_sanitize(struct intel_engine_cs *engine)
{
GEM_BUG_ON(execlists_active(&engine->execlists));
/*
* Poison residual state on resume, in case the suspend didn't!
*
* We have to assume that across suspend/resume (or other loss
* of control) that the contents of our pinned buffers has been
* lost, replaced by garbage. Since this doesn't always happen,
* let's poison such state so that we more quickly spot when
* we falsely assume it has been preserved.
*/
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
reset_csb_pointers(engine);
/*
* The kernel_context HWSP is stored in the status_page. As above,
* that may be lost on resume/initialisation, and so we need to
* reset the value in the HWSP.
*/
sanitize_hwsp(engine);
/* And scrub the dirty cachelines for the HWSP */
drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE);
intel_engine_reset_pinned_contexts(engine);
}
static void enable_error_interrupt(struct intel_engine_cs *engine)
{
u32 status;
engine->execlists.error_interrupt = 0;
ENGINE_WRITE(engine, RING_EMR, ~0u);
ENGINE_WRITE(engine, RING_EIR, ~0u); /* clear all existing errors */
status = ENGINE_READ(engine, RING_ESR);
if (unlikely(status)) {
drm_err(&engine->i915->drm,
"engine '%s' resumed still in error: %08x\n",
engine->name, status);
__intel_gt_reset(engine->gt, engine->mask);
}
/*
* On current gen8+, we have 2 signals to play with
*
* - I915_ERROR_INSTUCTION (bit 0)
*
* Generate an error if the command parser encounters an invalid
* instruction
*
* This is a fatal error.
*
* - CP_PRIV (bit 2)
*
* Generate an error on privilege violation (where the CP replaces
* the instruction with a no-op). This also fires for writes into
* read-only scratch pages.
*
* This is a non-fatal error, parsing continues.
*
* * there are a few others defined for odd HW that we do not use
*
* Since CP_PRIV fires for cases where we have chosen to ignore the
* error (as the HW is validating and suppressing the mistakes), we
* only unmask the instruction error bit.
*/
ENGINE_WRITE(engine, RING_EMR, ~I915_ERROR_INSTRUCTION);
}
static void enable_execlists(struct intel_engine_cs *engine)
{
u32 mode;
assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
if (GRAPHICS_VER(engine->i915) >= 11)
mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE);
else
mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE);
ENGINE_WRITE_FW(engine, RING_MODE_GEN7, mode);
ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
ENGINE_WRITE_FW(engine,
RING_HWS_PGA,
i915_ggtt_offset(engine->status_page.vma));
ENGINE_POSTING_READ(engine, RING_HWS_PGA);
enable_error_interrupt(engine);
}
static int execlists_resume(struct intel_engine_cs *engine)
{
intel_mocs_init_engine(engine);
intel_breadcrumbs_reset(engine->breadcrumbs);
enable_execlists(engine);
if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE)
xehp_enable_ccs_engines(engine);
return 0;
}
static void execlists_reset_prepare(struct intel_engine_cs *engine)
{
ENGINE_TRACE(engine, "depth<-%d\n",
atomic_read(&engine->sched_engine->tasklet.count));
/*
* Prevent request submission to the hardware until we have
* completed the reset in i915_gem_reset_finish(). If a request
* is completed by one engine, it may then queue a request
* to a second via its execlists->tasklet *just* as we are
* calling engine->resume() and also writing the ELSP.
* Turning off the execlists->tasklet until the reset is over
* prevents the race.
*/
__tasklet_disable_sync_once(&engine->sched_engine->tasklet);
GEM_BUG_ON(!reset_in_progress(engine));
/*
* We stop engines, otherwise we might get failed reset and a
* dead gpu (on elk). Also as modern gpu as kbl can suffer
* from system hang if batchbuffer is progressing when
* the reset is issued, regardless of READY_TO_RESET ack.
* Thus assume it is best to stop engines on all gens
* where we have a gpu reset.
*
* WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
*
* FIXME: Wa for more modern gens needs to be validated
*/
ring_set_paused(engine, 1);
intel_engine_stop_cs(engine);
/*
* Wa_22011802037: In addition to stopping the cs, we need
* to wait for any pending mi force wakeups
*/
if (IS_MTL_GRAPHICS_STEP(engine->i915, M, STEP_A0, STEP_B0) ||
(GRAPHICS_VER(engine->i915) >= 11 &&
GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 70)))
intel_engine_wait_for_pending_mi_fw(engine);
engine->execlists.reset_ccid = active_ccid(engine);
}
static struct i915_request **
reset_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
drm_clflush_virt_range(execlists->csb_write,
sizeof(execlists->csb_write[0]));
inactive = process_csb(engine, inactive); /* drain preemption events */
/* Following the reset, we need to reload the CSB read/write pointers */
reset_csb_pointers(engine);
return inactive;
}
static void
execlists_reset_active(struct intel_engine_cs *engine, bool stalled)
{
struct intel_context *ce;
struct i915_request *rq;
u32 head;
/*
* Save the currently executing context, even if we completed
* its request, it was still running at the time of the
* reset and will have been clobbered.
*/
rq = active_context(engine, engine->execlists.reset_ccid);
if (!rq)
return;
ce = rq->context;
GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
if (__i915_request_is_complete(rq)) {
/* Idle context; tidy up the ring so we can restart afresh */
head = intel_ring_wrap(ce->ring, rq->tail);
goto out_replay;
}
/* We still have requests in-flight; the engine should be active */
GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
/* Context has requests still in-flight; it should not be idle! */
GEM_BUG_ON(i915_active_is_idle(&ce->active));
rq = active_request(ce->timeline, rq);
head = intel_ring_wrap(ce->ring, rq->head);
GEM_BUG_ON(head == ce->ring->tail);
/*
* If this request hasn't started yet, e.g. it is waiting on a
* semaphore, we need to avoid skipping the request or else we
* break the signaling chain. However, if the context is corrupt
* the request will not restart and we will be stuck with a wedged
* device. It is quite often the case that if we issue a reset
* while the GPU is loading the context image, that the context
* image becomes corrupt.
*
* Otherwise, if we have not started yet, the request should replay
* perfectly and we do not need to flag the result as being erroneous.
*/
if (!__i915_request_has_started(rq))
goto out_replay;
/*
* If the request was innocent, we leave the request in the ELSP
* and will try to replay it on restarting. The context image may
* have been corrupted by the reset, in which case we may have
* to service a new GPU hang, but more likely we can continue on
* without impact.
*
* If the request was guilty, we presume the context is corrupt
* and have to at least restore the RING register in the context
* image back to the expected values to skip over the guilty request.
*/
__i915_request_reset(rq, stalled);
/*
* We want a simple context + ring to execute the breadcrumb update.
* We cannot rely on the context being intact across the GPU hang,
* so clear it and rebuild just what we need for the breadcrumb.
* All pending requests for this context will be zapped, and any
* future request will be after userspace has had the opportunity
* to recreate its own state.
*/
out_replay:
ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n",
head, ce->ring->tail);
lrc_reset_regs(ce, engine);
ce->lrc.lrca = lrc_update_regs(ce, engine, head);
}
static void execlists_reset_csb(struct intel_engine_cs *engine, bool stalled)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_request *post[2 * EXECLIST_MAX_PORTS];
struct i915_request **inactive;
rcu_read_lock();
inactive = reset_csb(engine, post);
execlists_reset_active(engine, true);
inactive = cancel_port_requests(execlists, inactive);
post_process_csb(post, inactive);
rcu_read_unlock();
}
static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
{
unsigned long flags;
ENGINE_TRACE(engine, "\n");
/* Process the csb, find the guilty context and throw away */
execlists_reset_csb(engine, stalled);
/* Push back any incomplete requests for replay after the reset. */
rcu_read_lock();
spin_lock_irqsave(&engine->sched_engine->lock, flags);
__unwind_incomplete_requests(engine);
spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
rcu_read_unlock();
}
static void nop_submission_tasklet(struct tasklet_struct *t)
{
struct i915_sched_engine *sched_engine =
from_tasklet(sched_engine, t, tasklet);
struct intel_engine_cs * const engine = sched_engine->private_data;
/* The driver is wedged; don't process any more events. */
WRITE_ONCE(engine->sched_engine->queue_priority_hint, INT_MIN);
}
static void execlists_reset_cancel(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_sched_engine * const sched_engine = engine->sched_engine;
struct i915_request *rq, *rn;
struct rb_node *rb;
unsigned long flags;
ENGINE_TRACE(engine, "\n");
/*
* Before we call engine->cancel_requests(), we should have exclusive
* access to the submission state. This is arranged for us by the
* caller disabling the interrupt generation, the tasklet and other
* threads that may then access the same state, giving us a free hand
* to reset state. However, we still need to let lockdep be aware that
* we know this state may be accessed in hardirq context, so we
* disable the irq around this manipulation and we want to keep
* the spinlock focused on its duties and not accidentally conflate
* coverage to the submission's irq state. (Similarly, although we
* shouldn't need to disable irq around the manipulation of the
* submission's irq state, we also wish to remind ourselves that
* it is irq state.)
*/
execlists_reset_csb(engine, true);
rcu_read_lock();
spin_lock_irqsave(&engine->sched_engine->lock, flags);
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->sched_engine->requests, sched.link)
i915_request_put(i915_request_mark_eio(rq));
intel_engine_signal_breadcrumbs(engine);
/* Flush the queued requests to the timeline list (for retiring). */
while ((rb = rb_first_cached(&sched_engine->queue))) {
struct i915_priolist *p = to_priolist(rb);
priolist_for_each_request_consume(rq, rn, p) {
if (i915_request_mark_eio(rq)) {
__i915_request_submit(rq);
i915_request_put(rq);
}
}
rb_erase_cached(&p->node, &sched_engine->queue);
i915_priolist_free(p);
}
/* On-hold requests will be flushed to timeline upon their release */
list_for_each_entry(rq, &sched_engine->hold, sched.link)
i915_request_put(i915_request_mark_eio(rq));
/* Cancel all attached virtual engines */
while ((rb = rb_first_cached(&execlists->virtual))) {
struct virtual_engine *ve =
rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
rb_erase_cached(rb, &execlists->virtual);
RB_CLEAR_NODE(rb);
spin_lock(&ve->base.sched_engine->lock);
rq = fetch_and_zero(&ve->request);
if (rq) {
if (i915_request_mark_eio(rq)) {
rq->engine = engine;
__i915_request_submit(rq);
i915_request_put(rq);
}
i915_request_put(rq);
ve->base.sched_engine->queue_priority_hint = INT_MIN;
}
spin_unlock(&ve->base.sched_engine->lock);
}
/* Remaining _unready_ requests will be nop'ed when submitted */
sched_engine->queue_priority_hint = INT_MIN;
sched_engine->queue = RB_ROOT_CACHED;
GEM_BUG_ON(__tasklet_is_enabled(&engine->sched_engine->tasklet));
engine->sched_engine->tasklet.callback = nop_submission_tasklet;
spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
rcu_read_unlock();
}
static void execlists_reset_finish(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
/*
* After a GPU reset, we may have requests to replay. Do so now while
* we still have the forcewake to be sure that the GPU is not allowed
* to sleep before we restart and reload a context.
*
* If the GPU reset fails, the engine may still be alive with requests
* inflight. We expect those to complete, or for the device to be
* reset as the next level of recovery, and as a final resort we
* will declare the device wedged.
*/
GEM_BUG_ON(!reset_in_progress(engine));
/* And kick in case we missed a new request submission. */
if (__tasklet_enable(&engine->sched_engine->tasklet))
__execlists_kick(execlists);
ENGINE_TRACE(engine, "depth->%d\n",
atomic_read(&engine->sched_engine->tasklet.count));
}
static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
{
ENGINE_WRITE(engine, RING_IMR,
~(engine->irq_enable_mask | engine->irq_keep_mask));
ENGINE_POSTING_READ(engine, RING_IMR);
}
static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
{
ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
}
static void execlists_park(struct intel_engine_cs *engine)
{
cancel_timer(&engine->execlists.timer);
cancel_timer(&engine->execlists.preempt);
}
static void add_to_engine(struct i915_request *rq)
{
lockdep_assert_held(&rq->engine->sched_engine->lock);
list_move_tail(&rq->sched.link, &rq->engine->sched_engine->requests);
}
static void remove_from_engine(struct i915_request *rq)
{
struct intel_engine_cs *engine, *locked;
/*
* Virtual engines complicate acquiring the engine timeline lock,
* as their rq->engine pointer is not stable until under that
* engine lock. The simple ploy we use is to take the lock then
* check that the rq still belongs to the newly locked engine.
*/
locked = READ_ONCE(rq->engine);
spin_lock_irq(&locked->sched_engine->lock);
while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
spin_unlock(&locked->sched_engine->lock);
spin_lock(&engine->sched_engine->lock);
locked = engine;
}
list_del_init(&rq->sched.link);
clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
/* Prevent further __await_execution() registering a cb, then flush */
set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
spin_unlock_irq(&locked->sched_engine->lock);
i915_request_notify_execute_cb_imm(rq);
}
static bool can_preempt(struct intel_engine_cs *engine)
{
if (GRAPHICS_VER(engine->i915) > 8)
return true;
/* GPGPU on bdw requires extra w/a; not implemented */
return engine->class != RENDER_CLASS;
}
static void kick_execlists(const struct i915_request *rq, int prio)
{
struct intel_engine_cs *engine = rq->engine;
struct i915_sched_engine *sched_engine = engine->sched_engine;
const struct i915_request *inflight;
/*
* We only need to kick the tasklet once for the high priority
* new context we add into the queue.
*/
if (prio <= sched_engine->queue_priority_hint)
return;
rcu_read_lock();
/* Nothing currently active? We're overdue for a submission! */
inflight = execlists_active(&engine->execlists);
if (!inflight)
goto unlock;
/*
* If we are already the currently executing context, don't
* bother evaluating if we should preempt ourselves.
*/
if (inflight->context == rq->context)
goto unlock;
ENGINE_TRACE(engine,
"bumping queue-priority-hint:%d for rq:%llx:%lld, inflight:%llx:%lld prio %d\n",
prio,
rq->fence.context, rq->fence.seqno,
inflight->fence.context, inflight->fence.seqno,
inflight->sched.attr.priority);
sched_engine->queue_priority_hint = prio;
/*
* Allow preemption of low -> normal -> high, but we do
* not allow low priority tasks to preempt other low priority
* tasks under the impression that latency for low priority
* tasks does not matter (as much as background throughput),
* so kiss.
*/
if (prio >= max(I915_PRIORITY_NORMAL, rq_prio(inflight)))
tasklet_hi_schedule(&sched_engine->tasklet);
unlock:
rcu_read_unlock();
}
static void execlists_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = execlists_submit_request;
engine->sched_engine->schedule = i915_schedule;
engine->sched_engine->kick_backend = kick_execlists;
engine->sched_engine->tasklet.callback = execlists_submission_tasklet;
}
static void execlists_shutdown(struct intel_engine_cs *engine)
{
/* Synchronise with residual timers and any softirq they raise */
del_timer_sync(&engine->execlists.timer);
del_timer_sync(&engine->execlists.preempt);
tasklet_kill(&engine->sched_engine->tasklet);
}
static void execlists_release(struct intel_engine_cs *engine)
{
engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
execlists_shutdown(engine);
intel_engine_cleanup_common(engine);
lrc_fini_wa_ctx(engine);
}
static ktime_t __execlists_engine_busyness(struct intel_engine_cs *engine,
ktime_t *now)
{
struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
ktime_t total = stats->total;
/*
* If the engine is executing something at the moment
* add it to the total.
*/
*now = ktime_get();
if (READ_ONCE(stats->active))
total = ktime_add(total, ktime_sub(*now, stats->start));
return total;
}
static ktime_t execlists_engine_busyness(struct intel_engine_cs *engine,
ktime_t *now)
{
struct intel_engine_execlists_stats *stats = &engine->stats.execlists;
unsigned int seq;
ktime_t total;
do {
seq = read_seqcount_begin(&stats->lock);
total = __execlists_engine_busyness(engine, now);
} while (read_seqcount_retry(&stats->lock, seq));
return total;
}
static void
logical_ring_default_vfuncs(struct intel_engine_cs *engine)
{
/* Default vfuncs which can be overridden by each engine. */
engine->resume = execlists_resume;
engine->cops = &execlists_context_ops;
engine->request_alloc = execlists_request_alloc;
engine->add_active_request = add_to_engine;
engine->remove_active_request = remove_from_engine;
engine->reset.prepare = execlists_reset_prepare;
engine->reset.rewind = execlists_reset_rewind;
engine->reset.cancel = execlists_reset_cancel;
engine->reset.finish = execlists_reset_finish;
engine->park = execlists_park;
engine->unpark = NULL;
engine->emit_flush = gen8_emit_flush_xcs;
engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
if (GRAPHICS_VER(engine->i915) >= 12) {
engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs;
engine->emit_flush = gen12_emit_flush_xcs;
}
engine->set_default_submission = execlists_set_default_submission;
if (GRAPHICS_VER(engine->i915) < 11) {
engine->irq_enable = gen8_logical_ring_enable_irq;
engine->irq_disable = gen8_logical_ring_disable_irq;
} else {
/*
* TODO: On Gen11 interrupt masks need to be clear
* to allow C6 entry. Keep interrupts enabled at
* and take the hit of generating extra interrupts
* until a more refined solution exists.
*/
}
intel_engine_set_irq_handler(engine, execlists_irq_handler);
engine->flags |= I915_ENGINE_SUPPORTS_STATS;
if (!intel_vgpu_active(engine->i915)) {
engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
if (can_preempt(engine)) {
engine->flags |= I915_ENGINE_HAS_PREEMPTION;
if (CONFIG_DRM_I915_TIMESLICE_DURATION)
engine->flags |= I915_ENGINE_HAS_TIMESLICES;
}
}
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) {
if (intel_engine_has_preemption(engine))
engine->emit_bb_start = xehp_emit_bb_start;
else
engine->emit_bb_start = xehp_emit_bb_start_noarb;
} else {
if (intel_engine_has_preemption(engine))
engine->emit_bb_start = gen8_emit_bb_start;
else
engine->emit_bb_start = gen8_emit_bb_start_noarb;
}
engine->busyness = execlists_engine_busyness;
}
static void logical_ring_default_irqs(struct intel_engine_cs *engine)
{
unsigned int shift = 0;
if (GRAPHICS_VER(engine->i915) < 11) {
const u8 irq_shifts[] = {
[RCS0] = GEN8_RCS_IRQ_SHIFT,
[BCS0] = GEN8_BCS_IRQ_SHIFT,
[VCS0] = GEN8_VCS0_IRQ_SHIFT,
[VCS1] = GEN8_VCS1_IRQ_SHIFT,
[VECS0] = GEN8_VECS_IRQ_SHIFT,
};
shift = irq_shifts[engine->id];
}
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
engine->irq_keep_mask |= GT_CS_MASTER_ERROR_INTERRUPT << shift;
engine->irq_keep_mask |= GT_WAIT_SEMAPHORE_INTERRUPT << shift;
}
static void rcs_submission_override(struct intel_engine_cs *engine)
{
switch (GRAPHICS_VER(engine->i915)) {
case 12:
engine->emit_flush = gen12_emit_flush_rcs;
engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
break;
case 11:
engine->emit_flush = gen11_emit_flush_rcs;
engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
break;
default:
engine->emit_flush = gen8_emit_flush_rcs;
engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
break;
}
}
int intel_execlists_submission_setup(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct drm_i915_private *i915 = engine->i915;
struct intel_uncore *uncore = engine->uncore;
u32 base = engine->mmio_base;
tasklet_setup(&engine->sched_engine->tasklet, execlists_submission_tasklet);
timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
timer_setup(&engine->execlists.preempt, execlists_preempt, 0);
logical_ring_default_vfuncs(engine);
logical_ring_default_irqs(engine);
seqcount_init(&engine->stats.execlists.lock);
if (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE)
rcs_submission_override(engine);
lrc_init_wa_ctx(engine);
if (HAS_LOGICAL_RING_ELSQ(i915)) {
execlists->submit_reg = intel_uncore_regs(uncore) +
i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base));
execlists->ctrl_reg = intel_uncore_regs(uncore) +
i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base));
engine->fw_domain = intel_uncore_forcewake_for_reg(engine->uncore,
RING_EXECLIST_CONTROL(engine->mmio_base),
FW_REG_WRITE);
} else {
execlists->submit_reg = intel_uncore_regs(uncore) +
i915_mmio_reg_offset(RING_ELSP(base));
}
execlists->csb_status =
(u64 *)&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
execlists->csb_write =
&engine->status_page.addr[INTEL_HWS_CSB_WRITE_INDEX(i915)];
if (GRAPHICS_VER(i915) < 11)
execlists->csb_size = GEN8_CSB_ENTRIES;
else
execlists->csb_size = GEN11_CSB_ENTRIES;
engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0);
if (GRAPHICS_VER(engine->i915) >= 11 &&
GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 50)) {
execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
}
/* Finally, take ownership and responsibility for cleanup! */
engine->sanitize = execlists_sanitize;
engine->release = execlists_release;
return 0;
}
static struct list_head *virtual_queue(struct virtual_engine *ve)
{
return &ve->base.sched_engine->default_priolist.requests;
}
static void rcu_virtual_context_destroy(struct work_struct *wrk)
{
struct virtual_engine *ve =
container_of(wrk, typeof(*ve), rcu.work);
unsigned int n;
GEM_BUG_ON(ve->context.inflight);
/* Preempt-to-busy may leave a stale request behind. */
if (unlikely(ve->request)) {
struct i915_request *old;
spin_lock_irq(&ve->base.sched_engine->lock);
old = fetch_and_zero(&ve->request);
if (old) {
GEM_BUG_ON(!__i915_request_is_complete(old));
__i915_request_submit(old);
i915_request_put(old);
}
spin_unlock_irq(&ve->base.sched_engine->lock);
}
/*
* Flush the tasklet in case it is still running on another core.
*
* This needs to be done before we remove ourselves from the siblings'
* rbtrees as in the case it is running in parallel, it may reinsert
* the rb_node into a sibling.
*/
tasklet_kill(&ve->base.sched_engine->tasklet);
/* Decouple ourselves from the siblings, no more access allowed. */
for (n = 0; n < ve->num_siblings; n++) {
struct intel_engine_cs *sibling = ve->siblings[n];
struct rb_node *node = &ve->nodes[sibling->id].rb;
if (RB_EMPTY_NODE(node))
continue;
spin_lock_irq(&sibling->sched_engine->lock);
/* Detachment is lazily performed in the sched_engine->tasklet */
if (!RB_EMPTY_NODE(node))
rb_erase_cached(node, &sibling->execlists.virtual);
spin_unlock_irq(&sibling->sched_engine->lock);
}
GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.sched_engine->tasklet));
GEM_BUG_ON(!list_empty(virtual_queue(ve)));
lrc_fini(&ve->context);
intel_context_fini(&ve->context);
if (ve->base.breadcrumbs)
intel_breadcrumbs_put(ve->base.breadcrumbs);
if (ve->base.sched_engine)
i915_sched_engine_put(ve->base.sched_engine);
intel_engine_free_request_pool(&ve->base);
kfree(ve);
}
static void virtual_context_destroy(struct kref *kref)
{
struct virtual_engine *ve =
container_of(kref, typeof(*ve), context.ref);
GEM_BUG_ON(!list_empty(&ve->context.signals));
/*
* When destroying the virtual engine, we have to be aware that
* it may still be in use from an hardirq/softirq context causing
* the resubmission of a completed request (background completion
* due to preempt-to-busy). Before we can free the engine, we need
* to flush the submission code and tasklets that are still potentially
* accessing the engine. Flushing the tasklets requires process context,
* and since we can guard the resubmit onto the engine with an RCU read
* lock, we can delegate the free of the engine to an RCU worker.
*/
INIT_RCU_WORK(&ve->rcu, rcu_virtual_context_destroy);
queue_rcu_work(ve->context.engine->i915->unordered_wq, &ve->rcu);
}
static void virtual_engine_initial_hint(struct virtual_engine *ve)
{
int swp;
/*
* Pick a random sibling on starting to help spread the load around.
*
* New contexts are typically created with exactly the same order
* of siblings, and often started in batches. Due to the way we iterate
* the array of sibling when submitting requests, sibling[0] is
* prioritised for dequeuing. If we make sure that sibling[0] is fairly
* randomised across the system, we also help spread the load by the
* first engine we inspect being different each time.
*
* NB This does not force us to execute on this engine, it will just
* typically be the first we inspect for submission.
*/
swp = get_random_u32_below(ve->num_siblings);
if (swp)
swap(ve->siblings[swp], ve->siblings[0]);
}
static int virtual_context_alloc(struct intel_context *ce)
{
struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
return lrc_alloc(ce, ve->siblings[0]);
}
static int virtual_context_pre_pin(struct intel_context *ce,
struct i915_gem_ww_ctx *ww,
void **vaddr)
{
struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
/* Note: we must use a real engine class for setting up reg state */
return __execlists_context_pre_pin(ce, ve->siblings[0], ww, vaddr);
}
static int virtual_context_pin(struct intel_context *ce, void *vaddr)
{
struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
return lrc_pin(ce, ve->siblings[0], vaddr);
}
static void virtual_context_enter(struct intel_context *ce)
{
struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
unsigned int n;
for (n = 0; n < ve->num_siblings; n++)
intel_engine_pm_get(ve->siblings[n]);
intel_timeline_enter(ce->timeline);
}
static void virtual_context_exit(struct intel_context *ce)
{
struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
unsigned int n;
intel_timeline_exit(ce->timeline);
for (n = 0; n < ve->num_siblings; n++)
intel_engine_pm_put(ve->siblings[n]);
}
static struct intel_engine_cs *
virtual_get_sibling(struct intel_engine_cs *engine, unsigned int sibling)
{
struct virtual_engine *ve = to_virtual_engine(engine);
if (sibling >= ve->num_siblings)
return NULL;
return ve->siblings[sibling];
}
static const struct intel_context_ops virtual_context_ops = {
.flags = COPS_HAS_INFLIGHT | COPS_RUNTIME_CYCLES,
.alloc = virtual_context_alloc,
.cancel_request = execlists_context_cancel_request,
.pre_pin = virtual_context_pre_pin,
.pin = virtual_context_pin,
.unpin = lrc_unpin,
.post_unpin = lrc_post_unpin,
.enter = virtual_context_enter,
.exit = virtual_context_exit,
.destroy = virtual_context_destroy,
.get_sibling = virtual_get_sibling,
};
static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
{
struct i915_request *rq;
intel_engine_mask_t mask;
rq = READ_ONCE(ve->request);
if (!rq)
return 0;
/* The rq is ready for submission; rq->execution_mask is now stable. */
mask = rq->execution_mask;
if (unlikely(!mask)) {
/* Invalid selection, submit to a random engine in error */
i915_request_set_error_once(rq, -ENODEV);
mask = ve->siblings[0]->mask;
}
ENGINE_TRACE(&ve->base, "rq=%llx:%lld, mask=%x, prio=%d\n",
rq->fence.context, rq->fence.seqno,
mask, ve->base.sched_engine->queue_priority_hint);
return mask;
}
static void virtual_submission_tasklet(struct tasklet_struct *t)
{
struct i915_sched_engine *sched_engine =
from_tasklet(sched_engine, t, tasklet);
struct virtual_engine * const ve =
(struct virtual_engine *)sched_engine->private_data;
const int prio = READ_ONCE(sched_engine->queue_priority_hint);
intel_engine_mask_t mask;
unsigned int n;
rcu_read_lock();
mask = virtual_submission_mask(ve);
rcu_read_unlock();
if (unlikely(!mask))
return;
for (n = 0; n < ve->num_siblings; n++) {
struct intel_engine_cs *sibling = READ_ONCE(ve->siblings[n]);
struct ve_node * const node = &ve->nodes[sibling->id];
struct rb_node **parent, *rb;
bool first;
if (!READ_ONCE(ve->request))
break; /* already handled by a sibling's tasklet */
spin_lock_irq(&sibling->sched_engine->lock);
if (unlikely(!(mask & sibling->mask))) {
if (!RB_EMPTY_NODE(&node->rb)) {
rb_erase_cached(&node->rb,
&sibling->execlists.virtual);
RB_CLEAR_NODE(&node->rb);
}
goto unlock_engine;
}
if (unlikely(!RB_EMPTY_NODE(&node->rb))) {
/*
* Cheat and avoid rebalancing the tree if we can
* reuse this node in situ.
*/
first = rb_first_cached(&sibling->execlists.virtual) ==
&node->rb;
if (prio == node->prio || (prio > node->prio && first))
goto submit_engine;
rb_erase_cached(&node->rb, &sibling->execlists.virtual);
}
rb = NULL;
first = true;
parent = &sibling->execlists.virtual.rb_root.rb_node;
while (*parent) {
struct ve_node *other;
rb = *parent;
other = rb_entry(rb, typeof(*other), rb);
if (prio > other->prio) {
parent = &rb->rb_left;
} else {
parent = &rb->rb_right;
first = false;
}
}
rb_link_node(&node->rb, rb, parent);
rb_insert_color_cached(&node->rb,
&sibling->execlists.virtual,
first);
submit_engine:
GEM_BUG_ON(RB_EMPTY_NODE(&node->rb));
node->prio = prio;
if (first && prio > sibling->sched_engine->queue_priority_hint)
tasklet_hi_schedule(&sibling->sched_engine->tasklet);
unlock_engine:
spin_unlock_irq(&sibling->sched_engine->lock);
if (intel_context_inflight(&ve->context))
break;
}
}
static void virtual_submit_request(struct i915_request *rq)
{
struct virtual_engine *ve = to_virtual_engine(rq->engine);
unsigned long flags;
ENGINE_TRACE(&ve->base, "rq=%llx:%lld\n",
rq->fence.context,
rq->fence.seqno);
GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
spin_lock_irqsave(&ve->base.sched_engine->lock, flags);
/* By the time we resubmit a request, it may be completed */
if (__i915_request_is_complete(rq)) {
__i915_request_submit(rq);
goto unlock;
}
if (ve->request) { /* background completion from preempt-to-busy */
GEM_BUG_ON(!__i915_request_is_complete(ve->request));
__i915_request_submit(ve->request);
i915_request_put(ve->request);
}
ve->base.sched_engine->queue_priority_hint = rq_prio(rq);
ve->request = i915_request_get(rq);
GEM_BUG_ON(!list_empty(virtual_queue(ve)));
list_move_tail(&rq->sched.link, virtual_queue(ve));
tasklet_hi_schedule(&ve->base.sched_engine->tasklet);
unlock:
spin_unlock_irqrestore(&ve->base.sched_engine->lock, flags);
}
static struct intel_context *
execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
unsigned long flags)
{
struct drm_i915_private *i915 = siblings[0]->i915;
struct virtual_engine *ve;
unsigned int n;
int err;
ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL);
if (!ve)
return ERR_PTR(-ENOMEM);
ve->base.i915 = i915;
ve->base.gt = siblings[0]->gt;
ve->base.uncore = siblings[0]->uncore;
ve->base.id = -1;
ve->base.class = OTHER_CLASS;
ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
/*
* The decision on whether to submit a request using semaphores
* depends on the saturated state of the engine. We only compute
* this during HW submission of the request, and we need for this
* state to be globally applied to all requests being submitted
* to this engine. Virtual engines encompass more than one physical
* engine and so we cannot accurately tell in advance if one of those
* engines is already saturated and so cannot afford to use a semaphore
* and be pessimized in priority for doing so -- if we are the only
* context using semaphores after all other clients have stopped, we
* will be starved on the saturated system. Such a global switch for
* semaphores is less than ideal, but alas is the current compromise.
*/
ve->base.saturated = ALL_ENGINES;
snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
intel_engine_init_execlists(&ve->base);
ve->base.sched_engine = i915_sched_engine_create(ENGINE_VIRTUAL);
if (!ve->base.sched_engine) {
err = -ENOMEM;
goto err_put;
}
ve->base.sched_engine->private_data = &ve->base;
ve->base.cops = &virtual_context_ops;
ve->base.request_alloc = execlists_request_alloc;
ve->base.sched_engine->schedule = i915_schedule;
ve->base.sched_engine->kick_backend = kick_execlists;
ve->base.submit_request = virtual_submit_request;
INIT_LIST_HEAD(virtual_queue(ve));
tasklet_setup(&ve->base.sched_engine->tasklet, virtual_submission_tasklet);
intel_context_init(&ve->context, &ve->base);
ve->base.breadcrumbs = intel_breadcrumbs_create(NULL);
if (!ve->base.breadcrumbs) {
err = -ENOMEM;
goto err_put;
}
for (n = 0; n < count; n++) {
struct intel_engine_cs *sibling = siblings[n];
GEM_BUG_ON(!is_power_of_2(sibling->mask));
if (sibling->mask & ve->base.mask) {
drm_dbg(&i915->drm,
"duplicate %s entry in load balancer\n",
sibling->name);
err = -EINVAL;
goto err_put;
}
/*
* The virtual engine implementation is tightly coupled to
* the execlists backend -- we push out request directly
* into a tree inside each physical engine. We could support
* layering if we handle cloning of the requests and
* submitting a copy into each backend.
*/
if (sibling->sched_engine->tasklet.callback !=
execlists_submission_tasklet) {
err = -ENODEV;
goto err_put;
}
GEM_BUG_ON(RB_EMPTY_NODE(&ve->nodes[sibling->id].rb));
RB_CLEAR_NODE(&ve->nodes[sibling->id].rb);
ve->siblings[ve->num_siblings++] = sibling;
ve->base.mask |= sibling->mask;
ve->base.logical_mask |= sibling->logical_mask;
/*
* All physical engines must be compatible for their emission
* functions (as we build the instructions during request
* construction and do not alter them before submission
* on the physical engine). We use the engine class as a guide
* here, although that could be refined.
*/
if (ve->base.class != OTHER_CLASS) {
if (ve->base.class != sibling->class) {
drm_dbg(&i915->drm,
"invalid mixing of engine class, sibling %d, already %d\n",
sibling->class, ve->base.class);
err = -EINVAL;
goto err_put;
}
continue;
}
ve->base.class = sibling->class;
ve->base.uabi_class = sibling->uabi_class;
snprintf(ve->base.name, sizeof(ve->base.name),
"v%dx%d", ve->base.class, count);
ve->base.context_size = sibling->context_size;
ve->base.add_active_request = sibling->add_active_request;
ve->base.remove_active_request = sibling->remove_active_request;
ve->base.emit_bb_start = sibling->emit_bb_start;
ve->base.emit_flush = sibling->emit_flush;
ve->base.emit_init_breadcrumb = sibling->emit_init_breadcrumb;
ve->base.emit_fini_breadcrumb = sibling->emit_fini_breadcrumb;
ve->base.emit_fini_breadcrumb_dw =
sibling->emit_fini_breadcrumb_dw;
ve->base.flags = sibling->flags;
}
ve->base.flags |= I915_ENGINE_IS_VIRTUAL;
virtual_engine_initial_hint(ve);
return &ve->context;
err_put:
intel_context_put(&ve->context);
return ERR_PTR(err);
}
void intel_execlists_show_requests(struct intel_engine_cs *engine,
struct drm_printer *m,
void (*show_request)(struct drm_printer *m,
const struct i915_request *rq,
const char *prefix,
int indent),
unsigned int max)
{
const struct intel_engine_execlists *execlists = &engine->execlists;
struct i915_sched_engine *sched_engine = engine->sched_engine;
struct i915_request *rq, *last;
unsigned long flags;
unsigned int count;
struct rb_node *rb;
spin_lock_irqsave(&sched_engine->lock, flags);
last = NULL;
count = 0;
list_for_each_entry(rq, &sched_engine->requests, sched.link) {
if (count++ < max - 1)
show_request(m, rq, "\t\t", 0);
else
last = rq;
}
if (last) {
if (count > max) {
drm_printf(m,
"\t\t...skipping %d executing requests...\n",
count - max);
}
show_request(m, last, "\t\t", 0);
}
if (sched_engine->queue_priority_hint != INT_MIN)
drm_printf(m, "\t\tQueue priority hint: %d\n",
READ_ONCE(sched_engine->queue_priority_hint));
last = NULL;
count = 0;
for (rb = rb_first_cached(&sched_engine->queue); rb; rb = rb_next(rb)) {
struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
priolist_for_each_request(rq, p) {
if (count++ < max - 1)
show_request(m, rq, "\t\t", 0);
else
last = rq;
}
}
if (last) {
if (count > max) {
drm_printf(m,
"\t\t...skipping %d queued requests...\n",
count - max);
}
show_request(m, last, "\t\t", 0);
}
last = NULL;
count = 0;
for (rb = rb_first_cached(&execlists->virtual); rb; rb = rb_next(rb)) {
struct virtual_engine *ve =
rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
struct i915_request *rq = READ_ONCE(ve->request);
if (rq) {
if (count++ < max - 1)
show_request(m, rq, "\t\t", 0);
else
last = rq;
}
}
if (last) {
if (count > max) {
drm_printf(m,
"\t\t...skipping %d virtual requests...\n",
count - max);
}
show_request(m, last, "\t\t", 0);
}
spin_unlock_irqrestore(&sched_engine->lock, flags);
}
void intel_execlists_dump_active_requests(struct intel_engine_cs *engine,
struct i915_request *hung_rq,
struct drm_printer *m)
{
unsigned long flags;
spin_lock_irqsave(&engine->sched_engine->lock, flags);
intel_engine_dump_active_requests(&engine->sched_engine->requests, hung_rq, m);
drm_printf(m, "\tOn hold?: %zu\n",
list_count_nodes(&engine->sched_engine->hold));
spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_execlists.c"
#endif
| linux-master | drivers/gpu/drm/i915/gt/intel_execlists_submission.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_gt.h"
#include "intel_gt_clock_utils.h"
#include "intel_gt_print.h"
#include "intel_gt_regs.h"
static u32 read_reference_ts_freq(struct intel_uncore *uncore)
{
u32 ts_override = intel_uncore_read(uncore, GEN9_TIMESTAMP_OVERRIDE);
u32 base_freq, frac_freq;
base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
base_freq *= 1000000;
frac_freq = ((ts_override &
GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
frac_freq = 1000000 / (frac_freq + 1);
return base_freq + frac_freq;
}
static u32 gen11_get_crystal_clock_freq(struct intel_uncore *uncore,
u32 rpm_config_reg)
{
u32 f19_2_mhz = 19200000;
u32 f24_mhz = 24000000;
u32 f25_mhz = 25000000;
u32 f38_4_mhz = 38400000;
u32 crystal_clock =
(rpm_config_reg & GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
switch (crystal_clock) {
case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
return f24_mhz;
case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
return f19_2_mhz;
case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
return f38_4_mhz;
case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
return f25_mhz;
default:
MISSING_CASE(crystal_clock);
return 0;
}
}
static u32 gen11_read_clock_frequency(struct intel_uncore *uncore)
{
u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
u32 freq = 0;
/*
* Note that on gen11+, the clock frequency may be reconfigured.
* We do not, and we assume nobody else does.
*
* First figure out the reference frequency. There are 2 ways
* we can compute the frequency, either through the
* TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
* tells us which one we should use.
*/
if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
freq = read_reference_ts_freq(uncore);
} else {
u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0);
freq = gen11_get_crystal_clock_freq(uncore, c0);
/*
* Now figure out how the command stream's timestamp
* register increments from this frequency (it might
* increment only every few clock cycle).
*/
freq >>= 3 - ((c0 & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
}
return freq;
}
static u32 gen9_read_clock_frequency(struct intel_uncore *uncore)
{
u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
u32 freq = 0;
if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
freq = read_reference_ts_freq(uncore);
} else {
freq = IS_GEN9_LP(uncore->i915) ? 19200000 : 24000000;
/*
* Now figure out how the command stream's timestamp
* register increments from this frequency (it might
* increment only every few clock cycle).
*/
freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
CTC_SHIFT_PARAMETER_SHIFT);
}
return freq;
}
static u32 gen6_read_clock_frequency(struct intel_uncore *uncore)
{
/*
* PRMs say:
*
* "The PCU TSC counts 10ns increments; this timestamp
* reflects bits 38:3 of the TSC (i.e. 80ns granularity,
* rolling over every 1.5 hours).
*/
return 12500000;
}
static u32 gen5_read_clock_frequency(struct intel_uncore *uncore)
{
/*
* 63:32 increments every 1000 ns
* 31:0 mbz
*/
return 1000000000 / 1000;
}
static u32 g4x_read_clock_frequency(struct intel_uncore *uncore)
{
/*
* 63:20 increments every 1/4 ns
* 19:0 mbz
*
* -> 63:32 increments every 1024 ns
*/
return 1000000000 / 1024;
}
static u32 gen4_read_clock_frequency(struct intel_uncore *uncore)
{
/*
* PRMs say:
*
* "The value in this register increments once every 16
* hclks." (through the “Clocking Configuration”
* (“CLKCFG”) MCHBAR register)
*
* Testing on actual hardware has shown there is no /16.
*/
return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000;
}
static u32 read_clock_frequency(struct intel_uncore *uncore)
{
if (GRAPHICS_VER(uncore->i915) >= 11)
return gen11_read_clock_frequency(uncore);
else if (GRAPHICS_VER(uncore->i915) >= 9)
return gen9_read_clock_frequency(uncore);
else if (GRAPHICS_VER(uncore->i915) >= 6)
return gen6_read_clock_frequency(uncore);
else if (GRAPHICS_VER(uncore->i915) == 5)
return gen5_read_clock_frequency(uncore);
else if (IS_G4X(uncore->i915))
return g4x_read_clock_frequency(uncore);
else if (GRAPHICS_VER(uncore->i915) == 4)
return gen4_read_clock_frequency(uncore);
else
return 0;
}
void intel_gt_init_clock_frequency(struct intel_gt *gt)
{
gt->clock_frequency = read_clock_frequency(gt->uncore);
/* Icelake appears to use another fixed frequency for CTX_TIMESTAMP */
if (GRAPHICS_VER(gt->i915) == 11)
gt->clock_period_ns = NSEC_PER_SEC / 13750000;
else if (gt->clock_frequency)
gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1);
GT_TRACE(gt,
"Using clock frequency: %dkHz, period: %dns, wrap: %lldms\n",
gt->clock_frequency / 1000,
gt->clock_period_ns,
div_u64(mul_u32_u32(gt->clock_period_ns, S32_MAX),
USEC_PER_SEC));
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
void intel_gt_check_clock_frequency(const struct intel_gt *gt)
{
if (gt->clock_frequency != read_clock_frequency(gt->uncore)) {
gt_err(gt, "GT clock frequency changed, was %uHz, now %uHz!\n",
gt->clock_frequency,
read_clock_frequency(gt->uncore));
}
}
#endif
static u64 div_u64_roundup(u64 nom, u32 den)
{
return div_u64(nom + den - 1, den);
}
u64 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u64 count)
{
return div_u64_roundup(count * NSEC_PER_SEC, gt->clock_frequency);
}
u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count)
{
return intel_gt_clock_interval_to_ns(gt, 16 * count);
}
u64 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u64 ns)
{
return div_u64_roundup(gt->clock_frequency * ns, NSEC_PER_SEC);
}
u64 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u64 ns)
{
u64 val;
/*
* Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
* 8300) freezing up around GPU hangs. Looks as if even
* scheduling/timer interrupts start misbehaving if the RPS
* EI/thresholds are "bad", leading to a very sluggish or even
* frozen machine.
*/
val = div_u64_roundup(intel_gt_ns_to_clock_interval(gt, ns), 16);
if (GRAPHICS_VER(gt->i915) == 6)
val = div_u64_roundup(val, 25) * 25;
return val;
}
| linux-master | drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
#include <linux/pm_runtime.h>
#include <linux/string_helpers.h>
#include "gem/i915_gem_region.h"
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_vgpu.h"
#include "intel_engine_regs.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_gt_regs.h"
#include "intel_pcode.h"
#include "intel_rc6.h"
/**
* DOC: RC6
*
* RC6 is a special power stage which allows the GPU to enter an very
* low-voltage mode when idle, using down to 0V while at this stage. This
* stage is entered automatically when the GPU is idle when RC6 support is
* enabled, and as soon as new workload arises GPU wakes up automatically as
* well.
*
* There are different RC6 modes available in Intel GPU, which differentiate
* among each other with the latency required to enter and leave RC6 and
* voltage consumed by the GPU in different states.
*
* The combination of the following flags define which states GPU is allowed
* to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
* RC6pp is deepest RC6. Their support by hardware varies according to the
* GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
* which brings the most power savings; deeper states save more power, but
* require higher latency to switch to and wake up.
*/
static struct intel_gt *rc6_to_gt(struct intel_rc6 *rc6)
{
return container_of(rc6, struct intel_gt, rc6);
}
static struct intel_uncore *rc6_to_uncore(struct intel_rc6 *rc)
{
return rc6_to_gt(rc)->uncore;
}
static struct drm_i915_private *rc6_to_i915(struct intel_rc6 *rc)
{
return rc6_to_gt(rc)->i915;
}
static void gen11_rc6_enable(struct intel_rc6 *rc6)
{
struct intel_gt *gt = rc6_to_gt(rc6);
struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
enum intel_engine_id id;
u32 pg_enable;
int i;
/*
* With GuCRC, these parameters are set by GuC
*/
if (!intel_uc_uses_guc_rc(>->uc)) {
/* 2b: Program RC6 thresholds.*/
intel_uncore_write_fw(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
intel_uncore_write_fw(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
intel_uncore_write_fw(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
intel_uncore_write_fw(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
for_each_engine(engine, rc6_to_gt(rc6), id)
intel_uncore_write_fw(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
intel_uncore_write_fw(uncore, GUC_MAX_IDLE_COUNT, 0xA);
intel_uncore_write_fw(uncore, GEN6_RC_SLEEP, 0);
intel_uncore_write_fw(uncore, GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
}
/*
* 2c: Program Coarse Power Gating Policies.
*
* Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
* use instead is a more conservative estimate for the maximum time
* it takes us to service a CS interrupt and submit a new ELSP - that
* is the time which the GPU is idle waiting for the CPU to select the
* next request to execute. If the idle hysteresis is less than that
* interrupt service latency, the hardware will automatically gate
* the power well and we will then incur the wake up cost on top of
* the service latency. A similar guide from plane_state is that we
* do not want the enable hysteresis to less than the wakeup latency.
*
* igt/gem_exec_nop/sequential provides a rough estimate for the
* service latency, and puts it under 10us for Icelake, similar to
* Broadwell+, To be conservative, we want to factor in a context
* switch on top (due to ksoftirqd).
*/
intel_uncore_write_fw(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 60);
intel_uncore_write_fw(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 60);
/* 3a: Enable RC6
*
* With GuCRC, we do not enable bit 31 of RC_CTL,
* thus allowing GuC to control RC6 entry/exit fully instead.
* We will not set the HW ENABLE and EI bits
*/
if (!intel_guc_rc_enable(>->uc.guc))
rc6->ctl_enable = GEN6_RC_CTL_RC6_ENABLE;
else
rc6->ctl_enable =
GEN6_RC_CTL_HW_ENABLE |
GEN6_RC_CTL_RC6_ENABLE |
GEN6_RC_CTL_EI_MODE(1);
/*
* Wa_16011777198 and BSpec 52698 - Render powergating must be off.
* FIXME BSpec is outdated, disabling powergating for MTL is just
* temporary wa and should be removed after fixing real cause
* of forcewake timeouts.
*/
if (IS_METEORLAKE(gt->i915) ||
IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) ||
IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0))
pg_enable =
GEN9_MEDIA_PG_ENABLE |
GEN11_MEDIA_SAMPLER_PG_ENABLE;
else
pg_enable =
GEN9_RENDER_PG_ENABLE |
GEN9_MEDIA_PG_ENABLE |
GEN11_MEDIA_SAMPLER_PG_ENABLE;
if (GRAPHICS_VER(gt->i915) >= 12) {
for (i = 0; i < I915_MAX_VCS; i++)
if (HAS_ENGINE(gt, _VCS(i)))
pg_enable |= (VDN_HCP_POWERGATE_ENABLE(i) |
VDN_MFX_POWERGATE_ENABLE(i));
}
intel_uncore_write_fw(uncore, GEN9_PG_ENABLE, pg_enable);
}
static void gen9_rc6_enable(struct intel_rc6 *rc6)
{
struct intel_uncore *uncore = rc6_to_uncore(rc6);
struct intel_engine_cs *engine;
enum intel_engine_id id;
/* 2b: Program RC6 thresholds.*/
if (GRAPHICS_VER(rc6_to_i915(rc6)) >= 11) {
intel_uncore_write_fw(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
intel_uncore_write_fw(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
} else if (IS_SKYLAKE(rc6_to_i915(rc6))) {
/*
* WaRsDoubleRc6WrlWithCoarsePowerGating:skl Doubling WRL only
* when CPG is enabled
*/
intel_uncore_write_fw(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
} else {
intel_uncore_write_fw(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
}
intel_uncore_write_fw(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
intel_uncore_write_fw(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
for_each_engine(engine, rc6_to_gt(rc6), id)
intel_uncore_write_fw(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
intel_uncore_write_fw(uncore, GUC_MAX_IDLE_COUNT, 0xA);
intel_uncore_write_fw(uncore, GEN6_RC_SLEEP, 0);
/*
* 2c: Program Coarse Power Gating Policies.
*
* Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
* use instead is a more conservative estimate for the maximum time
* it takes us to service a CS interrupt and submit a new ELSP - that
* is the time which the GPU is idle waiting for the CPU to select the
* next request to execute. If the idle hysteresis is less than that
* interrupt service latency, the hardware will automatically gate
* the power well and we will then incur the wake up cost on top of
* the service latency. A similar guide from plane_state is that we
* do not want the enable hysteresis to less than the wakeup latency.
*
* igt/gem_exec_nop/sequential provides a rough estimate for the
* service latency, and puts it around 10us for Broadwell (and other
* big core) and around 40us for Broxton (and other low power cores).
* [Note that for legacy ringbuffer submission, this is less than 1us!]
* However, the wakeup latency on Broxton is closer to 100us. To be
* conservative, we have to factor in a context switch on top (due
* to ksoftirqd).
*/
intel_uncore_write_fw(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
intel_uncore_write_fw(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
/* 3a: Enable RC6 */
intel_uncore_write_fw(uncore, GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
rc6->ctl_enable =
GEN6_RC_CTL_HW_ENABLE |
GEN6_RC_CTL_RC6_ENABLE |
GEN6_RC_CTL_EI_MODE(1);
/*
* WaRsDisableCoarsePowerGating:skl,cnl
* - Render/Media PG need to be disabled with RC6.
*/
if (!NEEDS_WaRsDisableCoarsePowerGating(rc6_to_i915(rc6)))
intel_uncore_write_fw(uncore, GEN9_PG_ENABLE,
GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE);
}
static void gen8_rc6_enable(struct intel_rc6 *rc6)
{
struct intel_uncore *uncore = rc6_to_uncore(rc6);
struct intel_engine_cs *engine;
enum intel_engine_id id;
/* 2b: Program RC6 thresholds.*/
intel_uncore_write_fw(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
intel_uncore_write_fw(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
intel_uncore_write_fw(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
for_each_engine(engine, rc6_to_gt(rc6), id)
intel_uncore_write_fw(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
intel_uncore_write_fw(uncore, GEN6_RC_SLEEP, 0);
intel_uncore_write_fw(uncore, GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
/* 3: Enable RC6 */
rc6->ctl_enable =
GEN6_RC_CTL_HW_ENABLE |
GEN7_RC_CTL_TO_MODE |
GEN6_RC_CTL_RC6_ENABLE;
}
static void gen6_rc6_enable(struct intel_rc6 *rc6)
{
struct intel_uncore *uncore = rc6_to_uncore(rc6);
struct drm_i915_private *i915 = rc6_to_i915(rc6);
struct intel_engine_cs *engine;
enum intel_engine_id id;
u32 rc6vids, rc6_mask;
int ret;
intel_uncore_write_fw(uncore, GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
intel_uncore_write_fw(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
intel_uncore_write_fw(uncore, GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
intel_uncore_write_fw(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000);
intel_uncore_write_fw(uncore, GEN6_RC_IDLE_HYSTERSIS, 25);
for_each_engine(engine, rc6_to_gt(rc6), id)
intel_uncore_write_fw(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
intel_uncore_write_fw(uncore, GEN6_RC_SLEEP, 0);
intel_uncore_write_fw(uncore, GEN6_RC1e_THRESHOLD, 1000);
intel_uncore_write_fw(uncore, GEN6_RC6_THRESHOLD, 50000);
intel_uncore_write_fw(uncore, GEN6_RC6p_THRESHOLD, 150000);
intel_uncore_write_fw(uncore, GEN6_RC6pp_THRESHOLD, 64000); /* unused */
/* We don't use those on Haswell */
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
if (HAS_RC6p(i915))
rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
if (HAS_RC6pp(i915))
rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
rc6->ctl_enable =
rc6_mask |
GEN6_RC_CTL_EI_MODE(1) |
GEN6_RC_CTL_HW_ENABLE;
rc6vids = 0;
ret = snb_pcode_read(rc6_to_gt(rc6)->uncore, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL);
if (GRAPHICS_VER(i915) == 6 && ret) {
drm_dbg(&i915->drm, "Couldn't check for BIOS workaround\n");
} else if (GRAPHICS_VER(i915) == 6 &&
(GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
drm_dbg(&i915->drm,
"You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
rc6vids &= 0xffff00;
rc6vids |= GEN6_ENCODE_RC6_VID(450);
ret = snb_pcode_write(rc6_to_gt(rc6)->uncore, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
if (ret)
drm_err(&i915->drm,
"Couldn't fix incorrect rc6 voltage\n");
}
}
/* Check that the pcbr address is not empty. */
static int chv_rc6_init(struct intel_rc6 *rc6)
{
struct intel_uncore *uncore = rc6_to_uncore(rc6);
struct drm_i915_private *i915 = rc6_to_i915(rc6);
resource_size_t pctx_paddr, paddr;
resource_size_t pctx_size = 32 * SZ_1K;
u32 pcbr;
pcbr = intel_uncore_read(uncore, VLV_PCBR);
if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
drm_dbg(&i915->drm, "BIOS didn't set up PCBR, fixing up\n");
paddr = i915->dsm.stolen.end + 1 - pctx_size;
GEM_BUG_ON(paddr > U32_MAX);
pctx_paddr = (paddr & ~4095);
intel_uncore_write(uncore, VLV_PCBR, pctx_paddr);
}
return 0;
}
static int vlv_rc6_init(struct intel_rc6 *rc6)
{
struct drm_i915_private *i915 = rc6_to_i915(rc6);
struct intel_uncore *uncore = rc6_to_uncore(rc6);
struct drm_i915_gem_object *pctx;
resource_size_t pctx_paddr;
resource_size_t pctx_size = 24 * SZ_1K;
u32 pcbr;
pcbr = intel_uncore_read(uncore, VLV_PCBR);
if (pcbr) {
/* BIOS set it up already, grab the pre-alloc'd space */
resource_size_t pcbr_offset;
pcbr_offset = (pcbr & ~4095) - i915->dsm.stolen.start;
pctx = i915_gem_object_create_region_at(i915->mm.stolen_region,
pcbr_offset,
pctx_size,
0);
if (IS_ERR(pctx))
return PTR_ERR(pctx);
goto out;
}
drm_dbg(&i915->drm, "BIOS didn't set up PCBR, fixing up\n");
/*
* From the Gunit register HAS:
* The Gfx driver is expected to program this register and ensure
* proper allocation within Gfx stolen memory. For example, this
* register should be programmed such than the PCBR range does not
* overlap with other ranges, such as the frame buffer, protected
* memory, or any other relevant ranges.
*/
pctx = i915_gem_object_create_stolen(i915, pctx_size);
if (IS_ERR(pctx)) {
drm_dbg(&i915->drm,
"not enough stolen space for PCTX, disabling\n");
return PTR_ERR(pctx);
}
GEM_BUG_ON(range_overflows_end_t(u64,
i915->dsm.stolen.start,
pctx->stolen->start,
U32_MAX));
pctx_paddr = i915->dsm.stolen.start + pctx->stolen->start;
intel_uncore_write(uncore, VLV_PCBR, pctx_paddr);
out:
rc6->pctx = pctx;
return 0;
}
static void chv_rc6_enable(struct intel_rc6 *rc6)
{
struct intel_uncore *uncore = rc6_to_uncore(rc6);
struct intel_engine_cs *engine;
enum intel_engine_id id;
/* 2a: Program RC6 thresholds.*/
intel_uncore_write_fw(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
intel_uncore_write_fw(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
intel_uncore_write_fw(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
for_each_engine(engine, rc6_to_gt(rc6), id)
intel_uncore_write_fw(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
intel_uncore_write_fw(uncore, GEN6_RC_SLEEP, 0);
/* TO threshold set to 500 us (0x186 * 1.28 us) */
intel_uncore_write_fw(uncore, GEN6_RC6_THRESHOLD, 0x186);
/* Allows RC6 residency counter to work */
intel_uncore_write_fw(uncore, VLV_COUNTER_CONTROL,
_MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
VLV_MEDIA_RC6_COUNT_EN |
VLV_RENDER_RC6_COUNT_EN));
/* 3: Enable RC6 */
rc6->ctl_enable = GEN7_RC_CTL_TO_MODE;
}
static void vlv_rc6_enable(struct intel_rc6 *rc6)
{
struct intel_uncore *uncore = rc6_to_uncore(rc6);
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_uncore_write_fw(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
intel_uncore_write_fw(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000);
intel_uncore_write_fw(uncore, GEN6_RC_IDLE_HYSTERSIS, 25);
for_each_engine(engine, rc6_to_gt(rc6), id)
intel_uncore_write_fw(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
intel_uncore_write_fw(uncore, GEN6_RC6_THRESHOLD, 0x557);
/* Allows RC6 residency counter to work */
intel_uncore_write_fw(uncore, VLV_COUNTER_CONTROL,
_MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
VLV_MEDIA_RC0_COUNT_EN |
VLV_RENDER_RC0_COUNT_EN |
VLV_MEDIA_RC6_COUNT_EN |
VLV_RENDER_RC6_COUNT_EN));
rc6->ctl_enable =
GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
}
bool intel_check_bios_c6_setup(struct intel_rc6 *rc6)
{
if (!rc6->bios_state_captured) {
struct intel_uncore *uncore = rc6_to_uncore(rc6);
intel_wakeref_t wakeref;
with_intel_runtime_pm(uncore->rpm, wakeref)
rc6->bios_rc_state = intel_uncore_read(uncore, GEN6_RC_STATE);
rc6->bios_state_captured = true;
}
return rc6->bios_rc_state & RC_SW_TARGET_STATE_MASK;
}
static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
{
struct intel_uncore *uncore = rc6_to_uncore(rc6);
struct drm_i915_private *i915 = rc6_to_i915(rc6);
u32 rc6_ctx_base, rc_ctl, rc_sw_target;
bool enable_rc6 = true;
rc_ctl = intel_uncore_read(uncore, GEN6_RC_CONTROL);
rc_sw_target = intel_uncore_read(uncore, GEN6_RC_STATE);
rc_sw_target &= RC_SW_TARGET_STATE_MASK;
rc_sw_target >>= RC_SW_TARGET_STATE_SHIFT;
drm_dbg(&i915->drm, "BIOS enabled RC states: "
"HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
str_on_off(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
str_on_off(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
rc_sw_target);
if (!(intel_uncore_read(uncore, RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
drm_dbg(&i915->drm, "RC6 Base location not set properly.\n");
enable_rc6 = false;
}
/*
* The exact context size is not known for BXT, so assume a page size
* for this check.
*/
rc6_ctx_base =
intel_uncore_read(uncore, RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
if (!(rc6_ctx_base >= i915->dsm.reserved.start &&
rc6_ctx_base + PAGE_SIZE < i915->dsm.reserved.end)) {
drm_dbg(&i915->drm, "RC6 Base address not as expected.\n");
enable_rc6 = false;
}
if (!((intel_uncore_read(uncore, PWRCTX_MAXCNT(RENDER_RING_BASE)) & IDLE_TIME_MASK) > 1 &&
(intel_uncore_read(uncore, PWRCTX_MAXCNT(GEN6_BSD_RING_BASE)) & IDLE_TIME_MASK) > 1 &&
(intel_uncore_read(uncore, PWRCTX_MAXCNT(BLT_RING_BASE)) & IDLE_TIME_MASK) > 1 &&
(intel_uncore_read(uncore, PWRCTX_MAXCNT(VEBOX_RING_BASE)) & IDLE_TIME_MASK) > 1)) {
drm_dbg(&i915->drm,
"Engine Idle wait time not set properly.\n");
enable_rc6 = false;
}
if (!intel_uncore_read(uncore, GEN8_PUSHBUS_CONTROL) ||
!intel_uncore_read(uncore, GEN8_PUSHBUS_ENABLE) ||
!intel_uncore_read(uncore, GEN8_PUSHBUS_SHIFT)) {
drm_dbg(&i915->drm, "Pushbus not setup properly.\n");
enable_rc6 = false;
}
if (!intel_uncore_read(uncore, GEN6_GFXPAUSE)) {
drm_dbg(&i915->drm, "GFX pause not setup properly.\n");
enable_rc6 = false;
}
if (!intel_uncore_read(uncore, GEN8_MISC_CTRL0)) {
drm_dbg(&i915->drm, "GPM control not setup properly.\n");
enable_rc6 = false;
}
return enable_rc6;
}
static bool rc6_supported(struct intel_rc6 *rc6)
{
struct drm_i915_private *i915 = rc6_to_i915(rc6);
struct intel_gt *gt = rc6_to_gt(rc6);
if (!HAS_RC6(i915))
return false;
if (intel_vgpu_active(i915))
return false;
if (is_mock_gt(rc6_to_gt(rc6)))
return false;
if (IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(rc6)) {
drm_notice(&i915->drm,
"RC6 and powersaving disabled by BIOS\n");
return false;
}
if (IS_METEORLAKE(gt->i915) &&
!intel_check_bios_c6_setup(rc6)) {
drm_notice(&i915->drm,
"C6 disabled by BIOS\n");
return false;
}
if (IS_MTL_MEDIA_STEP(gt->i915, STEP_A0, STEP_B0) &&
gt->type == GT_MEDIA) {
drm_notice(&i915->drm,
"Media RC6 disabled on A step\n");
return false;
}
return true;
}
static void rpm_get(struct intel_rc6 *rc6)
{
GEM_BUG_ON(rc6->wakeref);
pm_runtime_get_sync(rc6_to_i915(rc6)->drm.dev);
rc6->wakeref = true;
}
static void rpm_put(struct intel_rc6 *rc6)
{
GEM_BUG_ON(!rc6->wakeref);
pm_runtime_put(rc6_to_i915(rc6)->drm.dev);
rc6->wakeref = false;
}
static bool pctx_corrupted(struct intel_rc6 *rc6)
{
struct drm_i915_private *i915 = rc6_to_i915(rc6);
if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
return false;
if (intel_uncore_read(rc6_to_uncore(rc6), GEN8_RC6_CTX_INFO))
return false;
drm_notice(&i915->drm,
"RC6 context corruption, disabling runtime power management\n");
return true;
}
static void __intel_rc6_disable(struct intel_rc6 *rc6)
{
struct drm_i915_private *i915 = rc6_to_i915(rc6);
struct intel_uncore *uncore = rc6_to_uncore(rc6);
struct intel_gt *gt = rc6_to_gt(rc6);
/* Take control of RC6 back from GuC */
intel_guc_rc_disable(>->uc.guc);
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
if (GRAPHICS_VER(i915) >= 9)
intel_uncore_write_fw(uncore, GEN9_PG_ENABLE, 0);
intel_uncore_write_fw(uncore, GEN6_RC_CONTROL, 0);
intel_uncore_write_fw(uncore, GEN6_RC_STATE, 0);
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
}
static void rc6_res_reg_init(struct intel_rc6 *rc6)
{
memset(rc6->res_reg, INVALID_MMIO_REG.reg, sizeof(rc6->res_reg));
switch (rc6_to_gt(rc6)->type) {
case GT_MEDIA:
rc6->res_reg[INTEL_RC6_RES_RC6] = MTL_MEDIA_MC6;
break;
default:
rc6->res_reg[INTEL_RC6_RES_RC6_LOCKED] = GEN6_GT_GFX_RC6_LOCKED;
rc6->res_reg[INTEL_RC6_RES_RC6] = GEN6_GT_GFX_RC6;
rc6->res_reg[INTEL_RC6_RES_RC6p] = GEN6_GT_GFX_RC6p;
rc6->res_reg[INTEL_RC6_RES_RC6pp] = GEN6_GT_GFX_RC6pp;
break;
}
}
void intel_rc6_init(struct intel_rc6 *rc6)
{
struct drm_i915_private *i915 = rc6_to_i915(rc6);
int err;
/* Disable runtime-pm until we can save the GPU state with rc6 pctx */
rpm_get(rc6);
if (!rc6_supported(rc6))
return;
rc6_res_reg_init(rc6);
if (IS_CHERRYVIEW(i915))
err = chv_rc6_init(rc6);
else if (IS_VALLEYVIEW(i915))
err = vlv_rc6_init(rc6);
else
err = 0;
/* Sanitize rc6, ensure it is disabled before we are ready. */
__intel_rc6_disable(rc6);
rc6->supported = err == 0;
}
void intel_rc6_sanitize(struct intel_rc6 *rc6)
{
memset(rc6->prev_hw_residency, 0, sizeof(rc6->prev_hw_residency));
if (rc6->enabled) { /* unbalanced suspend/resume */
rpm_get(rc6);
rc6->enabled = false;
}
if (rc6->supported)
__intel_rc6_disable(rc6);
}
void intel_rc6_enable(struct intel_rc6 *rc6)
{
struct drm_i915_private *i915 = rc6_to_i915(rc6);
struct intel_uncore *uncore = rc6_to_uncore(rc6);
if (!rc6->supported)
return;
GEM_BUG_ON(rc6->enabled);
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
if (IS_CHERRYVIEW(i915))
chv_rc6_enable(rc6);
else if (IS_VALLEYVIEW(i915))
vlv_rc6_enable(rc6);
else if (GRAPHICS_VER(i915) >= 11)
gen11_rc6_enable(rc6);
else if (GRAPHICS_VER(i915) >= 9)
gen9_rc6_enable(rc6);
else if (IS_BROADWELL(i915))
gen8_rc6_enable(rc6);
else if (GRAPHICS_VER(i915) >= 6)
gen6_rc6_enable(rc6);
rc6->manual = rc6->ctl_enable & GEN6_RC_CTL_RC6_ENABLE;
if (NEEDS_RC6_CTX_CORRUPTION_WA(i915))
rc6->ctl_enable = 0;
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
if (unlikely(pctx_corrupted(rc6)))
return;
/* rc6 is ready, runtime-pm is go! */
rpm_put(rc6);
rc6->enabled = true;
}
void intel_rc6_unpark(struct intel_rc6 *rc6)
{
struct intel_uncore *uncore = rc6_to_uncore(rc6);
if (!rc6->enabled)
return;
/* Restore HW timers for automatic RC6 entry while busy */
intel_uncore_write_fw(uncore, GEN6_RC_CONTROL, rc6->ctl_enable);
}
void intel_rc6_park(struct intel_rc6 *rc6)
{
struct intel_uncore *uncore = rc6_to_uncore(rc6);
unsigned int target;
if (!rc6->enabled)
return;
if (unlikely(pctx_corrupted(rc6))) {
intel_rc6_disable(rc6);
return;
}
if (!rc6->manual)
return;
/* Turn off the HW timers and go directly to rc6 */
intel_uncore_write_fw(uncore, GEN6_RC_CONTROL, GEN6_RC_CTL_RC6_ENABLE);
if (HAS_RC6pp(rc6_to_i915(rc6)))
target = 0x6; /* deepest rc6 */
else if (HAS_RC6p(rc6_to_i915(rc6)))
target = 0x5; /* deep rc6 */
else
target = 0x4; /* normal rc6 */
intel_uncore_write_fw(uncore, GEN6_RC_STATE, target << RC_SW_TARGET_STATE_SHIFT);
}
void intel_rc6_disable(struct intel_rc6 *rc6)
{
if (!rc6->enabled)
return;
rpm_get(rc6);
rc6->enabled = false;
__intel_rc6_disable(rc6);
}
void intel_rc6_fini(struct intel_rc6 *rc6)
{
struct drm_i915_gem_object *pctx;
struct intel_uncore *uncore = rc6_to_uncore(rc6);
intel_rc6_disable(rc6);
/* We want the BIOS C6 state preserved across loads for MTL */
if (IS_METEORLAKE(rc6_to_i915(rc6)) && rc6->bios_state_captured)
intel_uncore_write_fw(uncore, GEN6_RC_STATE, rc6->bios_rc_state);
pctx = fetch_and_zero(&rc6->pctx);
if (pctx)
i915_gem_object_put(pctx);
if (rc6->wakeref)
rpm_put(rc6);
}
static u64 vlv_residency_raw(struct intel_uncore *uncore, const i915_reg_t reg)
{
u32 lower, upper, tmp;
int loop = 2;
/*
* The register accessed do not need forcewake. We borrow
* uncore lock to prevent concurrent access to range reg.
*/
lockdep_assert_held(&uncore->lock);
/*
* vlv and chv residency counters are 40 bits in width.
* With a control bit, we can choose between upper or lower
* 32bit window into this counter.
*
* Although we always use the counter in high-range mode elsewhere,
* userspace may attempt to read the value before rc6 is initialised,
* before we have set the default VLV_COUNTER_CONTROL value. So always
* set the high bit to be safe.
*/
intel_uncore_write_fw(uncore, VLV_COUNTER_CONTROL,
_MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
upper = intel_uncore_read_fw(uncore, reg);
do {
tmp = upper;
intel_uncore_write_fw(uncore, VLV_COUNTER_CONTROL,
_MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH));
lower = intel_uncore_read_fw(uncore, reg);
intel_uncore_write_fw(uncore, VLV_COUNTER_CONTROL,
_MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
upper = intel_uncore_read_fw(uncore, reg);
} while (upper != tmp && --loop);
/*
* Everywhere else we always use VLV_COUNTER_CONTROL with the
* VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
* now.
*/
return lower | (u64)upper << 8;
}
u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, enum intel_rc6_res_type id)
{
struct drm_i915_private *i915 = rc6_to_i915(rc6);
struct intel_uncore *uncore = rc6_to_uncore(rc6);
u64 time_hw, prev_hw, overflow_hw;
i915_reg_t reg = rc6->res_reg[id];
unsigned int fw_domains;
unsigned long flags;
u32 mul, div;
if (!rc6->supported)
return 0;
fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
spin_lock_irqsave(&uncore->lock, flags);
intel_uncore_forcewake_get__locked(uncore, fw_domains);
/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
mul = 1000000;
div = i915->czclk_freq;
overflow_hw = BIT_ULL(40);
time_hw = vlv_residency_raw(uncore, reg);
} else {
/* 833.33ns units on Gen9LP, 1.28us elsewhere. */
if (IS_GEN9_LP(i915)) {
mul = 10000;
div = 12;
} else {
mul = 1280;
div = 1;
}
overflow_hw = BIT_ULL(32);
time_hw = intel_uncore_read_fw(uncore, reg);
}
/*
* Counter wrap handling.
*
* Store previous hw counter values for counter wrap-around handling. But
* relying on a sufficient frequency of queries otherwise counters can still wrap.
*/
prev_hw = rc6->prev_hw_residency[id];
rc6->prev_hw_residency[id] = time_hw;
/* RC6 delta from last sample. */
if (time_hw >= prev_hw)
time_hw -= prev_hw;
else
time_hw += overflow_hw - prev_hw;
/* Add delta to RC6 extended raw driver copy. */
time_hw += rc6->cur_residency[id];
rc6->cur_residency[id] = time_hw;
intel_uncore_forcewake_put__locked(uncore, fw_domains);
spin_unlock_irqrestore(&uncore->lock, flags);
return mul_u64_u32_div(time_hw, mul, div);
}
u64 intel_rc6_residency_us(struct intel_rc6 *rc6, enum intel_rc6_res_type id)
{
return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(rc6, id), 1000);
}
void intel_rc6_print_residency(struct seq_file *m, const char *title,
enum intel_rc6_res_type id)
{
struct intel_gt *gt = m->private;
i915_reg_t reg = gt->rc6.res_reg[id];
intel_wakeref_t wakeref;
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
seq_printf(m, "%s %u (%llu us)\n", title,
intel_uncore_read(gt->uncore, reg),
intel_rc6_residency_us(>->rc6, id));
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_rc6.c"
#endif
| linux-master | drivers/gpu/drm/i915/gt/intel_rc6.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
#include <linux/list.h>
#include <linux/list_sort.h>
#include <linux/llist.h>
#include "i915_drv.h"
#include "intel_engine.h"
#include "intel_engine_user.h"
#include "intel_gt.h"
#include "uc/intel_guc_submission.h"
struct intel_engine_cs *
intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
{
struct rb_node *p = i915->uabi_engines.rb_node;
while (p) {
struct intel_engine_cs *it =
rb_entry(p, typeof(*it), uabi_node);
if (class < it->uabi_class)
p = p->rb_left;
else if (class > it->uabi_class ||
instance > it->uabi_instance)
p = p->rb_right;
else if (instance < it->uabi_instance)
p = p->rb_left;
else
return it;
}
return NULL;
}
void intel_engine_add_user(struct intel_engine_cs *engine)
{
llist_add((struct llist_node *)&engine->uabi_node,
(struct llist_head *)&engine->i915->uabi_engines);
}
static const u8 uabi_classes[] = {
[RENDER_CLASS] = I915_ENGINE_CLASS_RENDER,
[COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY,
[VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO,
[VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE,
[COMPUTE_CLASS] = I915_ENGINE_CLASS_COMPUTE,
};
static int engine_cmp(void *priv, const struct list_head *A,
const struct list_head *B)
{
const struct intel_engine_cs *a =
container_of((struct rb_node *)A, typeof(*a), uabi_node);
const struct intel_engine_cs *b =
container_of((struct rb_node *)B, typeof(*b), uabi_node);
if (uabi_classes[a->class] < uabi_classes[b->class])
return -1;
if (uabi_classes[a->class] > uabi_classes[b->class])
return 1;
if (a->instance < b->instance)
return -1;
if (a->instance > b->instance)
return 1;
return 0;
}
static struct llist_node *get_engines(struct drm_i915_private *i915)
{
return llist_del_all((struct llist_head *)&i915->uabi_engines);
}
static void sort_engines(struct drm_i915_private *i915,
struct list_head *engines)
{
struct llist_node *pos, *next;
llist_for_each_safe(pos, next, get_engines(i915)) {
struct intel_engine_cs *engine =
container_of((struct rb_node *)pos, typeof(*engine),
uabi_node);
list_add((struct list_head *)&engine->uabi_node, engines);
}
list_sort(NULL, engines, engine_cmp);
}
static void set_scheduler_caps(struct drm_i915_private *i915)
{
static const struct {
u8 engine;
u8 sched;
} map[] = {
#define MAP(x, y) { ilog2(I915_ENGINE_##x), ilog2(I915_SCHEDULER_CAP_##y) }
MAP(HAS_PREEMPTION, PREEMPTION),
MAP(HAS_SEMAPHORES, SEMAPHORES),
MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS),
#undef MAP
};
struct intel_engine_cs *engine;
u32 enabled, disabled;
enabled = 0;
disabled = 0;
for_each_uabi_engine(engine, i915) { /* all engines must agree! */
int i;
if (engine->sched_engine->schedule)
enabled |= (I915_SCHEDULER_CAP_ENABLED |
I915_SCHEDULER_CAP_PRIORITY);
else
disabled |= (I915_SCHEDULER_CAP_ENABLED |
I915_SCHEDULER_CAP_PRIORITY);
if (intel_uc_uses_guc_submission(&engine->gt->uc))
enabled |= I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP;
for (i = 0; i < ARRAY_SIZE(map); i++) {
if (engine->flags & BIT(map[i].engine))
enabled |= BIT(map[i].sched);
else
disabled |= BIT(map[i].sched);
}
}
i915->caps.scheduler = enabled & ~disabled;
if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
i915->caps.scheduler = 0;
}
const char *intel_engine_class_repr(u8 class)
{
static const char * const uabi_names[] = {
[RENDER_CLASS] = "rcs",
[COPY_ENGINE_CLASS] = "bcs",
[VIDEO_DECODE_CLASS] = "vcs",
[VIDEO_ENHANCEMENT_CLASS] = "vecs",
[OTHER_CLASS] = "other",
[COMPUTE_CLASS] = "ccs",
};
if (class >= ARRAY_SIZE(uabi_names) || !uabi_names[class])
return "xxx";
return uabi_names[class];
}
struct legacy_ring {
struct intel_gt *gt;
u8 class;
u8 instance;
};
static int legacy_ring_idx(const struct legacy_ring *ring)
{
static const struct {
u8 base, max;
} map[] = {
[RENDER_CLASS] = { RCS0, 1 },
[COPY_ENGINE_CLASS] = { BCS0, 1 },
[VIDEO_DECODE_CLASS] = { VCS0, I915_MAX_VCS },
[VIDEO_ENHANCEMENT_CLASS] = { VECS0, I915_MAX_VECS },
[COMPUTE_CLASS] = { CCS0, I915_MAX_CCS },
};
if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map)))
return INVALID_ENGINE;
if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max))
return INVALID_ENGINE;
return map[ring->class].base + ring->instance;
}
static void add_legacy_ring(struct legacy_ring *ring,
struct intel_engine_cs *engine)
{
if (engine->gt != ring->gt || engine->class != ring->class) {
ring->gt = engine->gt;
ring->class = engine->class;
ring->instance = 0;
}
engine->legacy_idx = legacy_ring_idx(ring);
if (engine->legacy_idx != INVALID_ENGINE)
ring->instance++;
}
static void engine_rename(struct intel_engine_cs *engine, const char *name, u16 instance)
{
char old[sizeof(engine->name)];
memcpy(old, engine->name, sizeof(engine->name));
scnprintf(engine->name, sizeof(engine->name), "%s%u", name, instance);
drm_dbg(&engine->i915->drm, "renamed %s to %s\n", old, engine->name);
}
void intel_engines_driver_register(struct drm_i915_private *i915)
{
struct legacy_ring ring = {};
struct list_head *it, *next;
struct rb_node **p, *prev;
LIST_HEAD(engines);
sort_engines(i915, &engines);
prev = NULL;
p = &i915->uabi_engines.rb_node;
list_for_each_safe(it, next, &engines) {
struct intel_engine_cs *engine =
container_of((struct rb_node *)it, typeof(*engine),
uabi_node);
if (intel_gt_has_unrecoverable_error(engine->gt))
continue; /* ignore incomplete engines */
/*
* We don't want to expose the GSC engine to the users, but we
* still rename it so it is easier to identify in the debug logs
*/
if (engine->id == GSC0) {
engine_rename(engine, "gsc", 0);
continue;
}
GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
engine->uabi_class = uabi_classes[engine->class];
GEM_BUG_ON(engine->uabi_class >=
ARRAY_SIZE(i915->engine_uabi_class_count));
engine->uabi_instance =
i915->engine_uabi_class_count[engine->uabi_class]++;
/* Replace the internal name with the final user facing name */
engine_rename(engine,
intel_engine_class_repr(engine->class),
engine->uabi_instance);
rb_link_node(&engine->uabi_node, prev, p);
rb_insert_color(&engine->uabi_node, &i915->uabi_engines);
GEM_BUG_ON(intel_engine_lookup_user(i915,
engine->uabi_class,
engine->uabi_instance) != engine);
/* Fix up the mapping to match default execbuf::user_map[] */
add_legacy_ring(&ring, engine);
prev = &engine->uabi_node;
p = &prev->rb_right;
}
if (IS_ENABLED(CONFIG_DRM_I915_SELFTESTS) &&
IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
struct intel_engine_cs *engine;
unsigned int isolation;
int class, inst;
int errors = 0;
for (class = 0; class < ARRAY_SIZE(i915->engine_uabi_class_count); class++) {
for (inst = 0; inst < i915->engine_uabi_class_count[class]; inst++) {
engine = intel_engine_lookup_user(i915,
class, inst);
if (!engine) {
pr_err("UABI engine not found for { class:%d, instance:%d }\n",
class, inst);
errors++;
continue;
}
if (engine->uabi_class != class ||
engine->uabi_instance != inst) {
pr_err("Wrong UABI engine:%s { class:%d, instance:%d } found for { class:%d, instance:%d }\n",
engine->name,
engine->uabi_class,
engine->uabi_instance,
class, inst);
errors++;
continue;
}
}
}
/*
* Make sure that classes with multiple engine instances all
* share the same basic configuration.
*/
isolation = intel_engines_has_context_isolation(i915);
for_each_uabi_engine(engine, i915) {
unsigned int bit = BIT(engine->uabi_class);
unsigned int expected = engine->default_state ? bit : 0;
if ((isolation & bit) != expected) {
pr_err("mismatching default context state for class %d on engine %s\n",
engine->uabi_class, engine->name);
errors++;
}
}
if (drm_WARN(&i915->drm, errors,
"Invalid UABI engine mapping found"))
i915->uabi_engines = RB_ROOT;
}
set_scheduler_caps(i915);
}
unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
unsigned int which;
which = 0;
for_each_uabi_engine(engine, i915)
if (engine->default_state)
which |= BIT(engine->uabi_class);
return which;
}
| linux-master | drivers/gpu/drm/i915/gt/intel_engine_user.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_gt.h"
#include "intel_gt_irq.h"
#include "intel_gt_pm_irq.h"
#include "intel_gt_regs.h"
static void write_pm_imr(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
u32 mask = gt->pm_imr;
i915_reg_t reg;
if (GRAPHICS_VER(i915) >= 11) {
reg = GEN11_GPM_WGBOXPERF_INTR_MASK;
mask <<= 16; /* pm is in upper half */
} else if (GRAPHICS_VER(i915) >= 8) {
reg = GEN8_GT_IMR(2);
} else {
reg = GEN6_PMIMR;
}
intel_uncore_write(uncore, reg, mask);
}
static void gen6_gt_pm_update_irq(struct intel_gt *gt,
u32 interrupt_mask,
u32 enabled_irq_mask)
{
u32 new_val;
WARN_ON(enabled_irq_mask & ~interrupt_mask);
lockdep_assert_held(gt->irq_lock);
new_val = gt->pm_imr;
new_val &= ~interrupt_mask;
new_val |= ~enabled_irq_mask & interrupt_mask;
if (new_val != gt->pm_imr) {
gt->pm_imr = new_val;
write_pm_imr(gt);
}
}
void gen6_gt_pm_unmask_irq(struct intel_gt *gt, u32 mask)
{
gen6_gt_pm_update_irq(gt, mask, mask);
}
void gen6_gt_pm_mask_irq(struct intel_gt *gt, u32 mask)
{
gen6_gt_pm_update_irq(gt, mask, 0);
}
void gen6_gt_pm_reset_iir(struct intel_gt *gt, u32 reset_mask)
{
struct intel_uncore *uncore = gt->uncore;
i915_reg_t reg = GRAPHICS_VER(gt->i915) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
lockdep_assert_held(gt->irq_lock);
intel_uncore_write(uncore, reg, reset_mask);
intel_uncore_write(uncore, reg, reset_mask);
intel_uncore_posting_read(uncore, reg);
}
static void write_pm_ier(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
u32 mask = gt->pm_ier;
i915_reg_t reg;
if (GRAPHICS_VER(i915) >= 11) {
reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE;
mask <<= 16; /* pm is in upper half */
} else if (GRAPHICS_VER(i915) >= 8) {
reg = GEN8_GT_IER(2);
} else {
reg = GEN6_PMIER;
}
intel_uncore_write(uncore, reg, mask);
}
void gen6_gt_pm_enable_irq(struct intel_gt *gt, u32 enable_mask)
{
lockdep_assert_held(gt->irq_lock);
gt->pm_ier |= enable_mask;
write_pm_ier(gt);
gen6_gt_pm_unmask_irq(gt, enable_mask);
}
void gen6_gt_pm_disable_irq(struct intel_gt *gt, u32 disable_mask)
{
lockdep_assert_held(gt->irq_lock);
gt->pm_ier &= ~disable_mask;
gen6_gt_pm_mask_irq(gt, disable_mask);
write_pm_ier(gt);
}
| linux-master | drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright © 2018 Intel Corporation
*/
#include "i915_selftest.h"
#include "selftest_engine.h"
int intel_engine_live_selftests(struct drm_i915_private *i915)
{
static int (* const tests[])(struct intel_gt *) = {
live_engine_pm_selftests,
NULL,
};
struct intel_gt *gt = to_gt(i915);
typeof(*tests) *fn;
for (fn = tests; *fn; fn++) {
int err;
err = (*fn)(gt);
if (err)
return err;
}
return 0;
}
| linux-master | drivers/gpu/drm/i915/gt/selftest_engine.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2016-2018 Intel Corporation
*/
#include <drm/drm_cache.h>
#include "gem/i915_gem_internal.h"
#include "i915_active.h"
#include "i915_drv.h"
#include "i915_syncmap.h"
#include "intel_gt.h"
#include "intel_ring.h"
#include "intel_timeline.h"
#define TIMELINE_SEQNO_BYTES 8
static struct i915_vma *hwsp_alloc(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj))
return ERR_CAST(obj);
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
if (IS_ERR(vma))
i915_gem_object_put(obj);
return vma;
}
static void __timeline_retire(struct i915_active *active)
{
struct intel_timeline *tl =
container_of(active, typeof(*tl), active);
i915_vma_unpin(tl->hwsp_ggtt);
intel_timeline_put(tl);
}
static int __timeline_active(struct i915_active *active)
{
struct intel_timeline *tl =
container_of(active, typeof(*tl), active);
__i915_vma_pin(tl->hwsp_ggtt);
intel_timeline_get(tl);
return 0;
}
I915_SELFTEST_EXPORT int
intel_timeline_pin_map(struct intel_timeline *timeline)
{
struct drm_i915_gem_object *obj = timeline->hwsp_ggtt->obj;
u32 ofs = offset_in_page(timeline->hwsp_offset);
void *vaddr;
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
timeline->hwsp_map = vaddr;
timeline->hwsp_seqno = memset(vaddr + ofs, 0, TIMELINE_SEQNO_BYTES);
drm_clflush_virt_range(vaddr + ofs, TIMELINE_SEQNO_BYTES);
return 0;
}
static int intel_timeline_init(struct intel_timeline *timeline,
struct intel_gt *gt,
struct i915_vma *hwsp,
unsigned int offset)
{
kref_init(&timeline->kref);
atomic_set(&timeline->pin_count, 0);
timeline->gt = gt;
if (hwsp) {
timeline->hwsp_offset = offset;
timeline->hwsp_ggtt = i915_vma_get(hwsp);
} else {
timeline->has_initial_breadcrumb = true;
hwsp = hwsp_alloc(gt);
if (IS_ERR(hwsp))
return PTR_ERR(hwsp);
timeline->hwsp_ggtt = hwsp;
}
timeline->hwsp_map = NULL;
timeline->hwsp_seqno = (void *)(long)timeline->hwsp_offset;
GEM_BUG_ON(timeline->hwsp_offset >= hwsp->size);
timeline->fence_context = dma_fence_context_alloc(1);
mutex_init(&timeline->mutex);
INIT_ACTIVE_FENCE(&timeline->last_request);
INIT_LIST_HEAD(&timeline->requests);
i915_syncmap_init(&timeline->sync);
i915_active_init(&timeline->active, __timeline_active,
__timeline_retire, 0);
return 0;
}
void intel_gt_init_timelines(struct intel_gt *gt)
{
struct intel_gt_timelines *timelines = >->timelines;
spin_lock_init(&timelines->lock);
INIT_LIST_HEAD(&timelines->active_list);
}
static void intel_timeline_fini(struct rcu_head *rcu)
{
struct intel_timeline *timeline =
container_of(rcu, struct intel_timeline, rcu);
if (timeline->hwsp_map)
i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
i915_vma_put(timeline->hwsp_ggtt);
i915_active_fini(&timeline->active);
/*
* A small race exists between intel_gt_retire_requests_timeout and
* intel_timeline_exit which could result in the syncmap not getting
* free'd. Rather than work to hard to seal this race, simply cleanup
* the syncmap on fini.
*/
i915_syncmap_free(&timeline->sync);
kfree(timeline);
}
struct intel_timeline *
__intel_timeline_create(struct intel_gt *gt,
struct i915_vma *global_hwsp,
unsigned int offset)
{
struct intel_timeline *timeline;
int err;
timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
if (!timeline)
return ERR_PTR(-ENOMEM);
err = intel_timeline_init(timeline, gt, global_hwsp, offset);
if (err) {
kfree(timeline);
return ERR_PTR(err);
}
return timeline;
}
struct intel_timeline *
intel_timeline_create_from_engine(struct intel_engine_cs *engine,
unsigned int offset)
{
struct i915_vma *hwsp = engine->status_page.vma;
struct intel_timeline *tl;
tl = __intel_timeline_create(engine->gt, hwsp, offset);
if (IS_ERR(tl))
return tl;
/* Borrow a nearby lock; we only create these timelines during init */
mutex_lock(&hwsp->vm->mutex);
list_add_tail(&tl->engine_link, &engine->status_page.timelines);
mutex_unlock(&hwsp->vm->mutex);
return tl;
}
void __intel_timeline_pin(struct intel_timeline *tl)
{
GEM_BUG_ON(!atomic_read(&tl->pin_count));
atomic_inc(&tl->pin_count);
}
int intel_timeline_pin(struct intel_timeline *tl, struct i915_gem_ww_ctx *ww)
{
int err;
if (atomic_add_unless(&tl->pin_count, 1, 0))
return 0;
if (!tl->hwsp_map) {
err = intel_timeline_pin_map(tl);
if (err)
return err;
}
err = i915_ggtt_pin(tl->hwsp_ggtt, ww, 0, PIN_HIGH);
if (err)
return err;
tl->hwsp_offset =
i915_ggtt_offset(tl->hwsp_ggtt) +
offset_in_page(tl->hwsp_offset);
GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
tl->fence_context, tl->hwsp_offset);
i915_active_acquire(&tl->active);
if (atomic_fetch_inc(&tl->pin_count)) {
i915_active_release(&tl->active);
__i915_vma_unpin(tl->hwsp_ggtt);
}
return 0;
}
void intel_timeline_reset_seqno(const struct intel_timeline *tl)
{
u32 *hwsp_seqno = (u32 *)tl->hwsp_seqno;
/* Must be pinned to be writable, and no requests in flight. */
GEM_BUG_ON(!atomic_read(&tl->pin_count));
memset(hwsp_seqno + 1, 0, TIMELINE_SEQNO_BYTES - sizeof(*hwsp_seqno));
WRITE_ONCE(*hwsp_seqno, tl->seqno);
drm_clflush_virt_range(hwsp_seqno, TIMELINE_SEQNO_BYTES);
}
void intel_timeline_enter(struct intel_timeline *tl)
{
struct intel_gt_timelines *timelines = &tl->gt->timelines;
/*
* Pretend we are serialised by the timeline->mutex.
*
* While generally true, there are a few exceptions to the rule
* for the engine->kernel_context being used to manage power
* transitions. As the engine_park may be called from under any
* timeline, it uses the power mutex as a global serialisation
* lock to prevent any other request entering its timeline.
*
* The rule is generally tl->mutex, otherwise engine->wakeref.mutex.
*
* However, intel_gt_retire_request() does not know which engine
* it is retiring along and so cannot partake in the engine-pm
* barrier, and there we use the tl->active_count as a means to
* pin the timeline in the active_list while the locks are dropped.
* Ergo, as that is outside of the engine-pm barrier, we need to
* use atomic to manipulate tl->active_count.
*/
lockdep_assert_held(&tl->mutex);
if (atomic_add_unless(&tl->active_count, 1, 0))
return;
spin_lock(&timelines->lock);
if (!atomic_fetch_inc(&tl->active_count)) {
/*
* The HWSP is volatile, and may have been lost while inactive,
* e.g. across suspend/resume. Be paranoid, and ensure that
* the HWSP value matches our seqno so we don't proclaim
* the next request as already complete.
*/
intel_timeline_reset_seqno(tl);
list_add_tail(&tl->link, &timelines->active_list);
}
spin_unlock(&timelines->lock);
}
void intel_timeline_exit(struct intel_timeline *tl)
{
struct intel_gt_timelines *timelines = &tl->gt->timelines;
/* See intel_timeline_enter() */
lockdep_assert_held(&tl->mutex);
GEM_BUG_ON(!atomic_read(&tl->active_count));
if (atomic_add_unless(&tl->active_count, -1, 1))
return;
spin_lock(&timelines->lock);
if (atomic_dec_and_test(&tl->active_count))
list_del(&tl->link);
spin_unlock(&timelines->lock);
/*
* Since this timeline is idle, all bariers upon which we were waiting
* must also be complete and so we can discard the last used barriers
* without loss of information.
*/
i915_syncmap_free(&tl->sync);
}
static u32 timeline_advance(struct intel_timeline *tl)
{
GEM_BUG_ON(!atomic_read(&tl->pin_count));
GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb);
return tl->seqno += 1 + tl->has_initial_breadcrumb;
}
static noinline int
__intel_timeline_get_seqno(struct intel_timeline *tl,
u32 *seqno)
{
u32 next_ofs = offset_in_page(tl->hwsp_offset + TIMELINE_SEQNO_BYTES);
/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
if (TIMELINE_SEQNO_BYTES <= BIT(5) && (next_ofs & BIT(5)))
next_ofs = offset_in_page(next_ofs + BIT(5));
tl->hwsp_offset = i915_ggtt_offset(tl->hwsp_ggtt) + next_ofs;
tl->hwsp_seqno = tl->hwsp_map + next_ofs;
intel_timeline_reset_seqno(tl);
*seqno = timeline_advance(tl);
GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno));
return 0;
}
int intel_timeline_get_seqno(struct intel_timeline *tl,
struct i915_request *rq,
u32 *seqno)
{
*seqno = timeline_advance(tl);
/* Replace the HWSP on wraparound for HW semaphores */
if (unlikely(!*seqno && tl->has_initial_breadcrumb))
return __intel_timeline_get_seqno(tl, seqno);
return 0;
}
int intel_timeline_read_hwsp(struct i915_request *from,
struct i915_request *to,
u32 *hwsp)
{
struct intel_timeline *tl;
int err;
rcu_read_lock();
tl = rcu_dereference(from->timeline);
if (i915_request_signaled(from) ||
!i915_active_acquire_if_busy(&tl->active))
tl = NULL;
if (tl) {
/* hwsp_offset may wraparound, so use from->hwsp_seqno */
*hwsp = i915_ggtt_offset(tl->hwsp_ggtt) +
offset_in_page(from->hwsp_seqno);
}
/* ensure we wait on the right request, if not, we completed */
if (tl && __i915_request_is_complete(from)) {
i915_active_release(&tl->active);
tl = NULL;
}
rcu_read_unlock();
if (!tl)
return 1;
/* Can't do semaphore waits on kernel context */
if (!tl->has_initial_breadcrumb) {
err = -EINVAL;
goto out;
}
err = i915_active_add_request(&tl->active, to);
out:
i915_active_release(&tl->active);
return err;
}
void intel_timeline_unpin(struct intel_timeline *tl)
{
GEM_BUG_ON(!atomic_read(&tl->pin_count));
if (!atomic_dec_and_test(&tl->pin_count))
return;
i915_active_release(&tl->active);
__i915_vma_unpin(tl->hwsp_ggtt);
}
void __intel_timeline_free(struct kref *kref)
{
struct intel_timeline *timeline =
container_of(kref, typeof(*timeline), kref);
GEM_BUG_ON(atomic_read(&timeline->pin_count));
GEM_BUG_ON(!list_empty(&timeline->requests));
GEM_BUG_ON(timeline->retire);
call_rcu(&timeline->rcu, intel_timeline_fini);
}
void intel_gt_fini_timelines(struct intel_gt *gt)
{
struct intel_gt_timelines *timelines = >->timelines;
GEM_BUG_ON(!list_empty(&timelines->active_list));
}
void intel_gt_show_timelines(struct intel_gt *gt,
struct drm_printer *m,
void (*show_request)(struct drm_printer *m,
const struct i915_request *rq,
const char *prefix,
int indent))
{
struct intel_gt_timelines *timelines = >->timelines;
struct intel_timeline *tl, *tn;
LIST_HEAD(free);
spin_lock(&timelines->lock);
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
unsigned long count, ready, inflight;
struct i915_request *rq, *rn;
struct dma_fence *fence;
if (!mutex_trylock(&tl->mutex)) {
drm_printf(m, "Timeline %llx: busy; skipping\n",
tl->fence_context);
continue;
}
intel_timeline_get(tl);
GEM_BUG_ON(!atomic_read(&tl->active_count));
atomic_inc(&tl->active_count); /* pin the list element */
spin_unlock(&timelines->lock);
count = 0;
ready = 0;
inflight = 0;
list_for_each_entry_safe(rq, rn, &tl->requests, link) {
if (i915_request_completed(rq))
continue;
count++;
if (i915_request_is_ready(rq))
ready++;
if (i915_request_is_active(rq))
inflight++;
}
drm_printf(m, "Timeline %llx: { ", tl->fence_context);
drm_printf(m, "count: %lu, ready: %lu, inflight: %lu",
count, ready, inflight);
drm_printf(m, ", seqno: { current: %d, last: %d }",
*tl->hwsp_seqno, tl->seqno);
fence = i915_active_fence_get(&tl->last_request);
if (fence) {
drm_printf(m, ", engine: %s",
to_request(fence)->engine->name);
dma_fence_put(fence);
}
drm_printf(m, " }\n");
if (show_request) {
list_for_each_entry_safe(rq, rn, &tl->requests, link)
show_request(m, rq, "", 2);
}
mutex_unlock(&tl->mutex);
spin_lock(&timelines->lock);
/* Resume list iteration after reacquiring spinlock */
list_safe_reset_next(tl, tn, link);
if (atomic_dec_and_test(&tl->active_count))
list_del(&tl->link);
/* Defer the final release to after the spinlock */
if (refcount_dec_and_test(&tl->kref.refcount)) {
GEM_BUG_ON(atomic_read(&tl->active_count));
list_add(&tl->link, &free);
}
}
spin_unlock(&timelines->lock);
list_for_each_entry_safe(tl, tn, &free, link)
__intel_timeline_free(&tl->kref);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "gt/selftests/mock_timeline.c"
#include "gt/selftest_timeline.c"
#endif
| linux-master | drivers/gpu/drm/i915/gt/intel_timeline.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2008-2018 Intel Corporation
*/
#include <linux/sched/mm.h>
#include <linux/stop_machine.h>
#include <linux/string_helpers.h>
#include "display/intel_display_reset.h"
#include "display/intel_overlay.h"
#include "gem/i915_gem_context.h"
#include "gt/intel_gt_regs.h"
#include "gt/uc/intel_gsc_fw.h"
#include "i915_drv.h"
#include "i915_file_private.h"
#include "i915_gpu_error.h"
#include "i915_irq.h"
#include "i915_reg.h"
#include "intel_breadcrumbs.h"
#include "intel_engine_pm.h"
#include "intel_engine_regs.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_gt_requests.h"
#include "intel_mchbar_regs.h"
#include "intel_pci_config.h"
#include "intel_reset.h"
#include "uc/intel_guc.h"
#define RESET_MAX_RETRIES 3
static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
{
struct drm_i915_file_private *file_priv = ctx->file_priv;
unsigned long prev_hang;
unsigned int score;
if (IS_ERR_OR_NULL(file_priv))
return;
score = 0;
if (banned)
score = I915_CLIENT_SCORE_CONTEXT_BAN;
prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
score += I915_CLIENT_SCORE_HANG_FAST;
if (score) {
atomic_add(score, &file_priv->ban_score);
drm_dbg(&ctx->i915->drm,
"client %s: gained %u ban score, now %u\n",
ctx->name, score,
atomic_read(&file_priv->ban_score));
}
}
static bool mark_guilty(struct i915_request *rq)
{
struct i915_gem_context *ctx;
unsigned long prev_hang;
bool banned;
int i;
if (intel_context_is_closed(rq->context))
return true;
rcu_read_lock();
ctx = rcu_dereference(rq->context->gem_context);
if (ctx && !kref_get_unless_zero(&ctx->ref))
ctx = NULL;
rcu_read_unlock();
if (!ctx)
return intel_context_is_banned(rq->context);
atomic_inc(&ctx->guilty_count);
/* Cool contexts are too cool to be banned! (Used for reset testing.) */
if (!i915_gem_context_is_bannable(ctx)) {
banned = false;
goto out;
}
drm_notice(&ctx->i915->drm,
"%s context reset due to GPU hang\n",
ctx->name);
/* Record the timestamp for the last N hangs */
prev_hang = ctx->hang_timestamp[0];
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++)
ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
ctx->hang_timestamp[i] = jiffies;
/* If we have hung N+1 times in rapid succession, we ban the context! */
banned = !i915_gem_context_is_recoverable(ctx);
if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
banned = true;
if (banned)
drm_dbg(&ctx->i915->drm, "context %s: guilty %d, banned\n",
ctx->name, atomic_read(&ctx->guilty_count));
client_mark_guilty(ctx, banned);
out:
i915_gem_context_put(ctx);
return banned;
}
static void mark_innocent(struct i915_request *rq)
{
struct i915_gem_context *ctx;
rcu_read_lock();
ctx = rcu_dereference(rq->context->gem_context);
if (ctx)
atomic_inc(&ctx->active_count);
rcu_read_unlock();
}
void __i915_request_reset(struct i915_request *rq, bool guilty)
{
bool banned = false;
RQ_TRACE(rq, "guilty? %s\n", str_yes_no(guilty));
GEM_BUG_ON(__i915_request_is_complete(rq));
rcu_read_lock(); /* protect the GEM context */
if (guilty) {
i915_request_set_error_once(rq, -EIO);
__i915_request_skip(rq);
banned = mark_guilty(rq);
} else {
i915_request_set_error_once(rq, -EAGAIN);
mark_innocent(rq);
}
rcu_read_unlock();
if (banned)
intel_context_ban(rq->context, rq);
}
static bool i915_in_reset(struct pci_dev *pdev)
{
u8 gdrst;
pci_read_config_byte(pdev, I915_GDRST, &gdrst);
return gdrst & GRDOM_RESET_STATUS;
}
static int i915_do_reset(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
int err;
/* Assert reset for at least 20 usec, and wait for acknowledgement. */
pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
udelay(50);
err = wait_for_atomic(i915_in_reset(pdev), 50);
/* Clear the reset request. */
pci_write_config_byte(pdev, I915_GDRST, 0);
udelay(50);
if (!err)
err = wait_for_atomic(!i915_in_reset(pdev), 50);
return err;
}
static bool g4x_reset_complete(struct pci_dev *pdev)
{
u8 gdrst;
pci_read_config_byte(pdev, I915_GDRST, &gdrst);
return (gdrst & GRDOM_RESET_ENABLE) == 0;
}
static int g33_do_reset(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
return wait_for_atomic(g4x_reset_complete(pdev), 50);
}
static int g4x_do_reset(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
struct intel_uncore *uncore = gt->uncore;
int ret;
/* WaVcpClkGateDisableForMediaReset:ctg,elk */
intel_uncore_rmw_fw(uncore, VDECCLK_GATE_D, 0, VCP_UNIT_CLOCK_GATE_DISABLE);
intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
pci_write_config_byte(pdev, I915_GDRST,
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
if (ret) {
GT_TRACE(gt, "Wait for media reset failed\n");
goto out;
}
pci_write_config_byte(pdev, I915_GDRST,
GRDOM_RENDER | GRDOM_RESET_ENABLE);
ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
if (ret) {
GT_TRACE(gt, "Wait for render reset failed\n");
goto out;
}
out:
pci_write_config_byte(pdev, I915_GDRST, 0);
intel_uncore_rmw_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE, 0);
intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
return ret;
}
static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
unsigned int retry)
{
struct intel_uncore *uncore = gt->uncore;
int ret;
intel_uncore_write_fw(uncore, ILK_GDSR,
ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
ILK_GRDOM_RESET_ENABLE, 0,
5000, 0,
NULL);
if (ret) {
GT_TRACE(gt, "Wait for render reset failed\n");
goto out;
}
intel_uncore_write_fw(uncore, ILK_GDSR,
ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
ILK_GRDOM_RESET_ENABLE, 0,
5000, 0,
NULL);
if (ret) {
GT_TRACE(gt, "Wait for media reset failed\n");
goto out;
}
out:
intel_uncore_write_fw(uncore, ILK_GDSR, 0);
intel_uncore_posting_read_fw(uncore, ILK_GDSR);
return ret;
}
/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
{
struct intel_uncore *uncore = gt->uncore;
int loops;
int err;
/*
* On some platforms, e.g. Jasperlake, we see that the engine register
* state is not cleared until shortly after GDRST reports completion,
* causing a failure as we try to immediately resume while the internal
* state is still in flux. If we immediately repeat the reset, the
* second reset appears to serialise with the first, and since it is a
* no-op, the registers should retain their reset value. However, there
* is still a concern that upon leaving the second reset, the internal
* engine state is still in flux and not ready for resuming.
*
* Starting on MTL, there are some prep steps that we need to do when
* resetting some engines that need to be applied every time we write to
* GEN6_GDRST. As those are time consuming (tens of ms), we don't want
* to perform that twice, so, since the Jasperlake issue hasn't been
* observed on MTL, we avoid repeating the reset on newer platforms.
*/
loops = GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 70) ? 2 : 1;
/*
* GEN6_GDRST is not in the gt power well, no need to check
* for fifo space for the write or forcewake the chip for
* the read
*/
do {
intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
/* Wait for the device to ack the reset requests. */
err = __intel_wait_for_register_fw(uncore, GEN6_GDRST,
hw_domain_mask, 0,
2000, 0,
NULL);
} while (err == 0 && --loops);
if (err)
GT_TRACE(gt,
"Wait for 0x%08x engines reset failed\n",
hw_domain_mask);
/*
* As we have observed that the engine state is still volatile
* after GDRST is acked, impose a small delay to let everything settle.
*/
udelay(50);
return err;
}
static int __gen6_reset_engines(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
struct intel_engine_cs *engine;
u32 hw_mask;
if (engine_mask == ALL_ENGINES) {
hw_mask = GEN6_GRDOM_FULL;
} else {
intel_engine_mask_t tmp;
hw_mask = 0;
for_each_engine_masked(engine, gt, engine_mask, tmp) {
hw_mask |= engine->reset_domain;
}
}
return gen6_hw_domain_reset(gt, hw_mask);
}
static int gen6_reset_engines(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
unsigned long flags;
int ret;
spin_lock_irqsave(>->uncore->lock, flags);
ret = __gen6_reset_engines(gt, engine_mask, retry);
spin_unlock_irqrestore(>->uncore->lock, flags);
return ret;
}
static struct intel_engine_cs *find_sfc_paired_vecs_engine(struct intel_engine_cs *engine)
{
int vecs_id;
GEM_BUG_ON(engine->class != VIDEO_DECODE_CLASS);
vecs_id = _VECS((engine->instance) / 2);
return engine->gt->engine[vecs_id];
}
struct sfc_lock_data {
i915_reg_t lock_reg;
i915_reg_t ack_reg;
i915_reg_t usage_reg;
u32 lock_bit;
u32 ack_bit;
u32 usage_bit;
u32 reset_bit;
};
static void get_sfc_forced_lock_data(struct intel_engine_cs *engine,
struct sfc_lock_data *sfc_lock)
{
switch (engine->class) {
default:
MISSING_CASE(engine->class);
fallthrough;
case VIDEO_DECODE_CLASS:
sfc_lock->lock_reg = GEN11_VCS_SFC_FORCED_LOCK(engine->mmio_base);
sfc_lock->lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
sfc_lock->ack_reg = GEN11_VCS_SFC_LOCK_STATUS(engine->mmio_base);
sfc_lock->ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT;
sfc_lock->usage_reg = GEN11_VCS_SFC_LOCK_STATUS(engine->mmio_base);
sfc_lock->usage_bit = GEN11_VCS_SFC_USAGE_BIT;
sfc_lock->reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
break;
case VIDEO_ENHANCEMENT_CLASS:
sfc_lock->lock_reg = GEN11_VECS_SFC_FORCED_LOCK(engine->mmio_base);
sfc_lock->lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
sfc_lock->ack_reg = GEN11_VECS_SFC_LOCK_ACK(engine->mmio_base);
sfc_lock->ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT;
sfc_lock->usage_reg = GEN11_VECS_SFC_USAGE(engine->mmio_base);
sfc_lock->usage_bit = GEN11_VECS_SFC_USAGE_BIT;
sfc_lock->reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
break;
}
}
static int gen11_lock_sfc(struct intel_engine_cs *engine,
u32 *reset_mask,
u32 *unlock_mask)
{
struct intel_uncore *uncore = engine->uncore;
u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
struct sfc_lock_data sfc_lock;
bool lock_obtained, lock_to_other = false;
int ret;
switch (engine->class) {
case VIDEO_DECODE_CLASS:
if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
return 0;
fallthrough;
case VIDEO_ENHANCEMENT_CLASS:
get_sfc_forced_lock_data(engine, &sfc_lock);
break;
default:
return 0;
}
if (!(intel_uncore_read_fw(uncore, sfc_lock.usage_reg) & sfc_lock.usage_bit)) {
struct intel_engine_cs *paired_vecs;
if (engine->class != VIDEO_DECODE_CLASS ||
GRAPHICS_VER(engine->i915) != 12)
return 0;
/*
* Wa_14010733141
*
* If the VCS-MFX isn't using the SFC, we also need to check
* whether VCS-HCP is using it. If so, we need to issue a *VE*
* forced lock on the VE engine that shares the same SFC.
*/
if (!(intel_uncore_read_fw(uncore,
GEN12_HCP_SFC_LOCK_STATUS(engine->mmio_base)) &
GEN12_HCP_SFC_USAGE_BIT))
return 0;
paired_vecs = find_sfc_paired_vecs_engine(engine);
get_sfc_forced_lock_data(paired_vecs, &sfc_lock);
lock_to_other = true;
*unlock_mask |= paired_vecs->mask;
} else {
*unlock_mask |= engine->mask;
}
/*
* If the engine is using an SFC, tell the engine that a software reset
* is going to happen. The engine will then try to force lock the SFC.
* If SFC ends up being locked to the engine we want to reset, we have
* to reset it as well (we will unlock it once the reset sequence is
* completed).
*/
intel_uncore_rmw_fw(uncore, sfc_lock.lock_reg, 0, sfc_lock.lock_bit);
ret = __intel_wait_for_register_fw(uncore,
sfc_lock.ack_reg,
sfc_lock.ack_bit,
sfc_lock.ack_bit,
1000, 0, NULL);
/*
* Was the SFC released while we were trying to lock it?
*
* We should reset both the engine and the SFC if:
* - We were locking the SFC to this engine and the lock succeeded
* OR
* - We were locking the SFC to a different engine (Wa_14010733141)
* but the SFC was released before the lock was obtained.
*
* Otherwise we need only reset the engine by itself and we can
* leave the SFC alone.
*/
lock_obtained = (intel_uncore_read_fw(uncore, sfc_lock.usage_reg) &
sfc_lock.usage_bit) != 0;
if (lock_obtained == lock_to_other)
return 0;
if (ret) {
ENGINE_TRACE(engine, "Wait for SFC forced lock ack failed\n");
return ret;
}
*reset_mask |= sfc_lock.reset_bit;
return 0;
}
static void gen11_unlock_sfc(struct intel_engine_cs *engine)
{
struct intel_uncore *uncore = engine->uncore;
u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
struct sfc_lock_data sfc_lock = {};
if (engine->class != VIDEO_DECODE_CLASS &&
engine->class != VIDEO_ENHANCEMENT_CLASS)
return;
if (engine->class == VIDEO_DECODE_CLASS &&
(BIT(engine->instance) & vdbox_sfc_access) == 0)
return;
get_sfc_forced_lock_data(engine, &sfc_lock);
intel_uncore_rmw_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit, 0);
}
static int __gen11_reset_engines(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
struct intel_engine_cs *engine;
intel_engine_mask_t tmp;
u32 reset_mask, unlock_mask = 0;
int ret;
if (engine_mask == ALL_ENGINES) {
reset_mask = GEN11_GRDOM_FULL;
} else {
reset_mask = 0;
for_each_engine_masked(engine, gt, engine_mask, tmp) {
reset_mask |= engine->reset_domain;
ret = gen11_lock_sfc(engine, &reset_mask, &unlock_mask);
if (ret)
goto sfc_unlock;
}
}
ret = gen6_hw_domain_reset(gt, reset_mask);
sfc_unlock:
/*
* We unlock the SFC based on the lock status and not the result of
* gen11_lock_sfc to make sure that we clean properly if something
* wrong happened during the lock (e.g. lock acquired after timeout
* expiration).
*
* Due to Wa_14010733141, we may have locked an SFC to an engine that
* wasn't being reset. So instead of calling gen11_unlock_sfc()
* on engine_mask, we instead call it on the mask of engines that our
* gen11_lock_sfc() calls told us actually had locks attempted.
*/
for_each_engine_masked(engine, gt, unlock_mask, tmp)
gen11_unlock_sfc(engine);
return ret;
}
static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
{
struct intel_uncore *uncore = engine->uncore;
const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base);
u32 request, mask, ack;
int ret;
if (I915_SELFTEST_ONLY(should_fail(&engine->reset_timeout, 1)))
return -ETIMEDOUT;
ack = intel_uncore_read_fw(uncore, reg);
if (ack & RESET_CTL_CAT_ERROR) {
/*
* For catastrophic errors, ready-for-reset sequence
* needs to be bypassed: HAS#396813
*/
request = RESET_CTL_CAT_ERROR;
mask = RESET_CTL_CAT_ERROR;
/* Catastrophic errors need to be cleared by HW */
ack = 0;
} else if (!(ack & RESET_CTL_READY_TO_RESET)) {
request = RESET_CTL_REQUEST_RESET;
mask = RESET_CTL_READY_TO_RESET;
ack = RESET_CTL_READY_TO_RESET;
} else {
return 0;
}
intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request));
ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
700, 0, NULL);
if (ret)
drm_err(&engine->i915->drm,
"%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
engine->name, request,
intel_uncore_read_fw(uncore, reg));
return ret;
}
static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
{
intel_uncore_write_fw(engine->uncore,
RING_RESET_CTL(engine->mmio_base),
_MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
}
static int gen8_reset_engines(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
struct intel_engine_cs *engine;
const bool reset_non_ready = retry >= 1;
intel_engine_mask_t tmp;
unsigned long flags;
int ret;
spin_lock_irqsave(>->uncore->lock, flags);
for_each_engine_masked(engine, gt, engine_mask, tmp) {
ret = gen8_engine_reset_prepare(engine);
if (ret && !reset_non_ready)
goto skip_reset;
/*
* If this is not the first failed attempt to prepare,
* we decide to proceed anyway.
*
* By doing so we risk context corruption and with
* some gens (kbl), possible system hang if reset
* happens during active bb execution.
*
* We rather take context corruption instead of
* failed reset with a wedged driver/gpu. And
* active bb execution case should be covered by
* stop_engines() we have before the reset.
*/
}
/*
* Wa_22011100796:dg2, whenever Full soft reset is required,
* reset all individual engines firstly, and then do a full soft reset.
*
* This is best effort, so ignore any error from the initial reset.
*/
if (IS_DG2(gt->i915) && engine_mask == ALL_ENGINES)
__gen11_reset_engines(gt, gt->info.engine_mask, 0);
if (GRAPHICS_VER(gt->i915) >= 11)
ret = __gen11_reset_engines(gt, engine_mask, retry);
else
ret = __gen6_reset_engines(gt, engine_mask, retry);
skip_reset:
for_each_engine_masked(engine, gt, engine_mask, tmp)
gen8_engine_reset_cancel(engine);
spin_unlock_irqrestore(>->uncore->lock, flags);
return ret;
}
static int mock_reset(struct intel_gt *gt,
intel_engine_mask_t mask,
unsigned int retry)
{
return 0;
}
typedef int (*reset_func)(struct intel_gt *,
intel_engine_mask_t engine_mask,
unsigned int retry);
static reset_func intel_get_gpu_reset(const struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
if (is_mock_gt(gt))
return mock_reset;
else if (GRAPHICS_VER(i915) >= 8)
return gen8_reset_engines;
else if (GRAPHICS_VER(i915) >= 6)
return gen6_reset_engines;
else if (GRAPHICS_VER(i915) >= 5)
return ilk_do_reset;
else if (IS_G4X(i915))
return g4x_do_reset;
else if (IS_G33(i915) || IS_PINEVIEW(i915))
return g33_do_reset;
else if (GRAPHICS_VER(i915) >= 3)
return i915_do_reset;
else
return NULL;
}
static int __reset_guc(struct intel_gt *gt)
{
u32 guc_domain =
GRAPHICS_VER(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
return gen6_hw_domain_reset(gt, guc_domain);
}
static bool needs_wa_14015076503(struct intel_gt *gt, intel_engine_mask_t engine_mask)
{
if (!IS_METEORLAKE(gt->i915) || !HAS_ENGINE(gt, GSC0))
return false;
if (!__HAS_ENGINE(engine_mask, GSC0))
return false;
return intel_gsc_uc_fw_init_done(>->uc.gsc);
}
static intel_engine_mask_t
wa_14015076503_start(struct intel_gt *gt, intel_engine_mask_t engine_mask, bool first)
{
if (!needs_wa_14015076503(gt, engine_mask))
return engine_mask;
/*
* wa_14015076503: if the GSC FW is loaded, we need to alert it that
* we're going to do a GSC engine reset and then wait for 200ms for the
* FW to get ready for it. However, if this is the first ALL_ENGINES
* reset attempt and the GSC is not busy, we can try to instead reset
* the GuC and all the other engines individually to avoid the 200ms
* wait.
* Skipping the GSC engine is safe because, differently from other
* engines, the GSCCS only role is to forward the commands to the GSC
* FW, so it doesn't have any HW outside of the CS itself and therefore
* it has no state that we don't explicitly re-init on resume or on
* context switch LRC or power context). The HW for the GSC uC is
* managed by the GSC FW so we don't need to care about that.
*/
if (engine_mask == ALL_ENGINES && first && intel_engine_is_idle(gt->engine[GSC0])) {
__reset_guc(gt);
engine_mask = gt->info.engine_mask & ~BIT(GSC0);
} else {
intel_uncore_rmw(gt->uncore,
HECI_H_GS1(MTL_GSC_HECI2_BASE),
0, HECI_H_GS1_ER_PREP);
/* make sure the reset bit is clear when writing the CSR reg */
intel_uncore_rmw(gt->uncore,
HECI_H_CSR(MTL_GSC_HECI2_BASE),
HECI_H_CSR_RST, HECI_H_CSR_IG);
msleep(200);
}
return engine_mask;
}
static void
wa_14015076503_end(struct intel_gt *gt, intel_engine_mask_t engine_mask)
{
if (!needs_wa_14015076503(gt, engine_mask))
return;
intel_uncore_rmw(gt->uncore,
HECI_H_GS1(MTL_GSC_HECI2_BASE),
HECI_H_GS1_ER_PREP, 0);
}
int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
{
const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
reset_func reset;
int ret = -ETIMEDOUT;
int retry;
reset = intel_get_gpu_reset(gt);
if (!reset)
return -ENODEV;
/*
* If the power well sleeps during the reset, the reset
* request may be dropped and never completes (causing -EIO).
*/
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
intel_engine_mask_t reset_mask;
reset_mask = wa_14015076503_start(gt, engine_mask, !retry);
GT_TRACE(gt, "engine_mask=%x\n", reset_mask);
preempt_disable();
ret = reset(gt, reset_mask, retry);
preempt_enable();
wa_14015076503_end(gt, reset_mask);
}
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
return ret;
}
bool intel_has_gpu_reset(const struct intel_gt *gt)
{
if (!gt->i915->params.reset)
return NULL;
return intel_get_gpu_reset(gt);
}
bool intel_has_reset_engine(const struct intel_gt *gt)
{
if (gt->i915->params.reset < 2)
return false;
return INTEL_INFO(gt->i915)->has_reset_engine;
}
int intel_reset_guc(struct intel_gt *gt)
{
int ret;
GEM_BUG_ON(!HAS_GT_UC(gt->i915));
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
ret = __reset_guc(gt);
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
return ret;
}
/*
* Ensure irq handler finishes, and not run again.
* Also return the active request so that we only search for it once.
*/
static void reset_prepare_engine(struct intel_engine_cs *engine)
{
/*
* During the reset sequence, we must prevent the engine from
* entering RC6. As the context state is undefined until we restart
* the engine, if it does enter RC6 during the reset, the state
* written to the powercontext is undefined and so we may lose
* GPU state upon resume, i.e. fail to restart after a reset.
*/
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
if (engine->reset.prepare)
engine->reset.prepare(engine);
}
static void revoke_mmaps(struct intel_gt *gt)
{
int i;
for (i = 0; i < gt->ggtt->num_fences; i++) {
struct drm_vma_offset_node *node;
struct i915_vma *vma;
u64 vma_offset;
vma = READ_ONCE(gt->ggtt->fence_regs[i].vma);
if (!vma)
continue;
if (!i915_vma_has_userfault(vma))
continue;
GEM_BUG_ON(vma->fence != >->ggtt->fence_regs[i]);
if (!vma->mmo)
continue;
node = &vma->mmo->vma_node;
vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT;
unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
drm_vma_node_offset_addr(node) + vma_offset,
vma->size,
1);
}
}
static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
intel_engine_mask_t awake = 0;
enum intel_engine_id id;
/* For GuC mode, ensure submission is disabled before stopping ring */
intel_uc_reset_prepare(>->uc);
for_each_engine(engine, gt, id) {
if (intel_engine_pm_get_if_awake(engine))
awake |= engine->mask;
reset_prepare_engine(engine);
}
return awake;
}
static void gt_revoke(struct intel_gt *gt)
{
revoke_mmaps(gt);
}
static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err;
/*
* Everything depends on having the GTT running, so we need to start
* there.
*/
err = i915_ggtt_enable_hw(gt->i915);
if (err)
return err;
local_bh_disable();
for_each_engine(engine, gt, id)
__intel_engine_reset(engine, stalled_mask & engine->mask);
local_bh_enable();
intel_uc_reset(>->uc, ALL_ENGINES);
intel_ggtt_restore_fences(gt->ggtt);
return err;
}
static void reset_finish_engine(struct intel_engine_cs *engine)
{
if (engine->reset.finish)
engine->reset.finish(engine);
intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
intel_engine_signal_breadcrumbs(engine);
}
static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, gt, id) {
reset_finish_engine(engine);
if (awake & engine->mask)
intel_engine_pm_put(engine);
}
intel_uc_reset_finish(>->uc);
}
static void nop_submit_request(struct i915_request *request)
{
RQ_TRACE(request, "-EIO\n");
request = i915_request_mark_eio(request);
if (request) {
i915_request_submit(request);
intel_engine_signal_breadcrumbs(request->engine);
i915_request_put(request);
}
}
static void __intel_gt_set_wedged(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
intel_engine_mask_t awake;
enum intel_engine_id id;
if (test_bit(I915_WEDGED, >->reset.flags))
return;
GT_TRACE(gt, "start\n");
/*
* First, stop submission to hw, but do not yet complete requests by
* rolling the global seqno forward (since this would complete requests
* for which we haven't set the fence error to EIO yet).
*/
awake = reset_prepare(gt);
/* Even if the GPU reset fails, it should still stop the engines */
if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
__intel_gt_reset(gt, ALL_ENGINES);
for_each_engine(engine, gt, id)
engine->submit_request = nop_submit_request;
/*
* Make sure no request can slip through without getting completed by
* either this call here to intel_engine_write_global_seqno, or the one
* in nop_submit_request.
*/
synchronize_rcu_expedited();
set_bit(I915_WEDGED, >->reset.flags);
/* Mark all executing requests as skipped */
local_bh_disable();
for_each_engine(engine, gt, id)
if (engine->reset.cancel)
engine->reset.cancel(engine);
intel_uc_cancel_requests(>->uc);
local_bh_enable();
reset_finish(gt, awake);
GT_TRACE(gt, "end\n");
}
void intel_gt_set_wedged(struct intel_gt *gt)
{
intel_wakeref_t wakeref;
if (test_bit(I915_WEDGED, >->reset.flags))
return;
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
mutex_lock(>->reset.mutex);
if (GEM_SHOW_DEBUG()) {
struct drm_printer p = drm_debug_printer(__func__);
struct intel_engine_cs *engine;
enum intel_engine_id id;
drm_printf(&p, "called from %pS\n", (void *)_RET_IP_);
for_each_engine(engine, gt, id) {
if (intel_engine_is_idle(engine))
continue;
intel_engine_dump(engine, &p, "%s\n", engine->name);
}
}
__intel_gt_set_wedged(gt);
mutex_unlock(>->reset.mutex);
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
}
static bool __intel_gt_unset_wedged(struct intel_gt *gt)
{
struct intel_gt_timelines *timelines = >->timelines;
struct intel_timeline *tl;
bool ok;
if (!test_bit(I915_WEDGED, >->reset.flags))
return true;
/* Never fully initialised, recovery impossible */
if (intel_gt_has_unrecoverable_error(gt))
return false;
GT_TRACE(gt, "start\n");
/*
* Before unwedging, make sure that all pending operations
* are flushed and errored out - we may have requests waiting upon
* third party fences. We marked all inflight requests as EIO, and
* every execbuf since returned EIO, for consistency we want all
* the currently pending requests to also be marked as EIO, which
* is done inside our nop_submit_request - and so we must wait.
*
* No more can be submitted until we reset the wedged bit.
*/
spin_lock(&timelines->lock);
list_for_each_entry(tl, &timelines->active_list, link) {
struct dma_fence *fence;
fence = i915_active_fence_get(&tl->last_request);
if (!fence)
continue;
spin_unlock(&timelines->lock);
/*
* All internal dependencies (i915_requests) will have
* been flushed by the set-wedge, but we may be stuck waiting
* for external fences. These should all be capped to 10s
* (I915_FENCE_TIMEOUT) so this wait should not be unbounded
* in the worst case.
*/
dma_fence_default_wait(fence, false, MAX_SCHEDULE_TIMEOUT);
dma_fence_put(fence);
/* Restart iteration after droping lock */
spin_lock(&timelines->lock);
tl = list_entry(&timelines->active_list, typeof(*tl), link);
}
spin_unlock(&timelines->lock);
/* We must reset pending GPU events before restoring our submission */
ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
ok = __intel_gt_reset(gt, ALL_ENGINES) == 0;
if (!ok) {
/*
* Warn CI about the unrecoverable wedged condition.
* Time for a reboot.
*/
add_taint_for_CI(gt->i915, TAINT_WARN);
return false;
}
/*
* Undo nop_submit_request. We prevent all new i915 requests from
* being queued (by disallowing execbuf whilst wedged) so having
* waited for all active requests above, we know the system is idle
* and do not have to worry about a thread being inside
* engine->submit_request() as we swap over. So unlike installing
* the nop_submit_request on reset, we can do this from normal
* context and do not require stop_machine().
*/
intel_engines_reset_default_submission(gt);
GT_TRACE(gt, "end\n");
smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
clear_bit(I915_WEDGED, >->reset.flags);
return true;
}
bool intel_gt_unset_wedged(struct intel_gt *gt)
{
bool result;
mutex_lock(>->reset.mutex);
result = __intel_gt_unset_wedged(gt);
mutex_unlock(>->reset.mutex);
return result;
}
static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
{
int err, i;
err = __intel_gt_reset(gt, ALL_ENGINES);
for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
msleep(10 * (i + 1));
err = __intel_gt_reset(gt, ALL_ENGINES);
}
if (err)
return err;
return gt_reset(gt, stalled_mask);
}
static int resume(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
int ret;
for_each_engine(engine, gt, id) {
ret = intel_engine_resume(engine);
if (ret)
return ret;
}
return 0;
}
/**
* intel_gt_reset - reset chip after a hang
* @gt: #intel_gt to reset
* @stalled_mask: mask of the stalled engines with the guilty requests
* @reason: user error message for why we are resetting
*
* Reset the chip. Useful if a hang is detected. Marks the device as wedged
* on failure.
*
* Procedure is fairly simple:
* - reset the chip using the reset reg
* - re-init context state
* - re-init hardware status page
* - re-init ring buffer
* - re-init interrupt state
* - re-init display
*/
void intel_gt_reset(struct intel_gt *gt,
intel_engine_mask_t stalled_mask,
const char *reason)
{
intel_engine_mask_t awake;
int ret;
GT_TRACE(gt, "flags=%lx\n", gt->reset.flags);
might_sleep();
GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, >->reset.flags));
/*
* FIXME: Revoking cpu mmap ptes cannot be done from a dma_fence
* critical section like gpu reset.
*/
gt_revoke(gt);
mutex_lock(>->reset.mutex);
/* Clear any previous failed attempts at recovery. Time to try again. */
if (!__intel_gt_unset_wedged(gt))
goto unlock;
if (reason)
drm_notice(>->i915->drm,
"Resetting chip for %s\n", reason);
atomic_inc(>->i915->gpu_error.reset_count);
awake = reset_prepare(gt);
if (!intel_has_gpu_reset(gt)) {
if (gt->i915->params.reset)
drm_err(>->i915->drm, "GPU reset not supported\n");
else
drm_dbg(>->i915->drm, "GPU reset disabled\n");
goto error;
}
if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
intel_runtime_pm_disable_interrupts(gt->i915);
if (do_reset(gt, stalled_mask)) {
drm_err(>->i915->drm, "Failed to reset chip\n");
goto taint;
}
if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
intel_runtime_pm_enable_interrupts(gt->i915);
intel_overlay_reset(gt->i915);
/*
* Next we need to restore the context, but we don't use those
* yet either...
*
* Ring buffer needs to be re-initialized in the KMS case, or if X
* was running at the time of the reset (i.e. we weren't VT
* switched away).
*/
ret = intel_gt_init_hw(gt);
if (ret) {
drm_err(>->i915->drm,
"Failed to initialise HW following reset (%d)\n",
ret);
goto taint;
}
ret = resume(gt);
if (ret)
goto taint;
finish:
reset_finish(gt, awake);
unlock:
mutex_unlock(>->reset.mutex);
return;
taint:
/*
* History tells us that if we cannot reset the GPU now, we
* never will. This then impacts everything that is run
* subsequently. On failing the reset, we mark the driver
* as wedged, preventing further execution on the GPU.
* We also want to go one step further and add a taint to the
* kernel so that any subsequent faults can be traced back to
* this failure. This is important for CI, where if the
* GPU/driver fails we would like to reboot and restart testing
* rather than continue on into oblivion. For everyone else,
* the system should still plod along, but they have been warned!
*/
add_taint_for_CI(gt->i915, TAINT_WARN);
error:
__intel_gt_set_wedged(gt);
goto finish;
}
static int intel_gt_reset_engine(struct intel_engine_cs *engine)
{
return __intel_gt_reset(engine->gt, engine->mask);
}
int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
{
struct intel_gt *gt = engine->gt;
int ret;
ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags);
GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, >->reset.flags));
if (intel_engine_uses_guc(engine))
return -ENODEV;
if (!intel_engine_pm_get_if_awake(engine))
return 0;
reset_prepare_engine(engine);
if (msg)
drm_notice(&engine->i915->drm,
"Resetting %s for %s\n", engine->name, msg);
atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
ret = intel_gt_reset_engine(engine);
if (ret) {
/* If we fail here, we expect to fallback to a global reset */
ENGINE_TRACE(engine, "Failed to reset %s, err: %d\n", engine->name, ret);
goto out;
}
/*
* The request that caused the hang is stuck on elsp, we know the
* active request and can drop it, adjust head to skip the offending
* request to resume executing remaining requests in the queue.
*/
__intel_engine_reset(engine, true);
/*
* The engine and its registers (and workarounds in case of render)
* have been reset to their default values. Follow the init_ring
* process to program RING_MODE, HWSP and re-enable submission.
*/
ret = intel_engine_resume(engine);
out:
intel_engine_cancel_stop_cs(engine);
reset_finish_engine(engine);
intel_engine_pm_put_async(engine);
return ret;
}
/**
* intel_engine_reset - reset GPU engine to recover from a hang
* @engine: engine to reset
* @msg: reason for GPU reset; or NULL for no drm_notice()
*
* Reset a specific GPU engine. Useful if a hang is detected.
* Returns zero on successful reset or otherwise an error code.
*
* Procedure is:
* - identifies the request that caused the hang and it is dropped
* - reset engine (which will force the engine to idle)
* - re-init/configure engine
*/
int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
{
int err;
local_bh_disable();
err = __intel_engine_reset_bh(engine, msg);
local_bh_enable();
return err;
}
static void intel_gt_reset_global(struct intel_gt *gt,
u32 engine_mask,
const char *reason)
{
struct kobject *kobj = >->i915->drm.primary->kdev->kobj;
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
struct intel_wedge_me w;
kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
GT_TRACE(gt, "resetting chip, engines=%x\n", engine_mask);
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
/* Use a watchdog to ensure that our reset completes */
intel_wedge_on_timeout(&w, gt, 60 * HZ) {
intel_display_reset_prepare(gt->i915);
intel_gt_reset(gt, engine_mask, reason);
intel_display_reset_finish(gt->i915);
}
if (!test_bit(I915_WEDGED, >->reset.flags))
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
}
/**
* intel_gt_handle_error - handle a gpu error
* @gt: the intel_gt
* @engine_mask: mask representing engines that are hung
* @flags: control flags
* @fmt: Error message format string
*
* Do some basic checking of register state at error time and
* dump it to the syslog. Also call i915_capture_error_state() to make
* sure we get a record and make it available in debugfs. Fire a uevent
* so userspace knows something bad happened (should trigger collection
* of a ring dump etc.).
*/
void intel_gt_handle_error(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned long flags,
const char *fmt, ...)
{
struct intel_engine_cs *engine;
intel_wakeref_t wakeref;
intel_engine_mask_t tmp;
char error_msg[80];
char *msg = NULL;
if (fmt) {
va_list args;
va_start(args, fmt);
vscnprintf(error_msg, sizeof(error_msg), fmt, args);
va_end(args);
msg = error_msg;
}
/*
* In most cases it's guaranteed that we get here with an RPM
* reference held, for example because there is a pending GPU
* request that won't finish until the reset is done. This
* isn't the case at least when we get here by doing a
* simulated reset via debugfs, so get an RPM reference.
*/
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
engine_mask &= gt->info.engine_mask;
if (flags & I915_ERROR_CAPTURE) {
i915_capture_error_state(gt, engine_mask, CORE_DUMP_FLAG_NONE);
intel_gt_clear_error_registers(gt, engine_mask);
}
/*
* Try engine reset when available. We fall back to full reset if
* single reset fails.
*/
if (!intel_uc_uses_guc_submission(>->uc) &&
intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
local_bh_disable();
for_each_engine_masked(engine, gt, engine_mask, tmp) {
BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
>->reset.flags))
continue;
if (__intel_engine_reset_bh(engine, msg) == 0)
engine_mask &= ~engine->mask;
clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
>->reset.flags);
}
local_bh_enable();
}
if (!engine_mask)
goto out;
/* Full reset needs the mutex, stop any other user trying to do so. */
if (test_and_set_bit(I915_RESET_BACKOFF, >->reset.flags)) {
wait_event(gt->reset.queue,
!test_bit(I915_RESET_BACKOFF, >->reset.flags));
goto out; /* piggy-back on the other reset */
}
/* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
synchronize_rcu_expedited();
/*
* Prevent any other reset-engine attempt. We don't do this for GuC
* submission the GuC owns the per-engine reset, not the i915.
*/
if (!intel_uc_uses_guc_submission(>->uc)) {
for_each_engine(engine, gt, tmp) {
while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
>->reset.flags))
wait_on_bit(>->reset.flags,
I915_RESET_ENGINE + engine->id,
TASK_UNINTERRUPTIBLE);
}
}
/* Flush everyone using a resource about to be clobbered */
synchronize_srcu_expedited(>->reset.backoff_srcu);
intel_gt_reset_global(gt, engine_mask, msg);
if (!intel_uc_uses_guc_submission(>->uc)) {
for_each_engine(engine, gt, tmp)
clear_bit_unlock(I915_RESET_ENGINE + engine->id,
>->reset.flags);
}
clear_bit_unlock(I915_RESET_BACKOFF, >->reset.flags);
smp_mb__after_atomic();
wake_up_all(>->reset.queue);
out:
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
}
static int _intel_gt_reset_lock(struct intel_gt *gt, int *srcu, bool retry)
{
might_lock(>->reset.backoff_srcu);
if (retry)
might_sleep();
rcu_read_lock();
while (test_bit(I915_RESET_BACKOFF, >->reset.flags)) {
rcu_read_unlock();
if (!retry)
return -EBUSY;
if (wait_event_interruptible(gt->reset.queue,
!test_bit(I915_RESET_BACKOFF,
>->reset.flags)))
return -EINTR;
rcu_read_lock();
}
*srcu = srcu_read_lock(>->reset.backoff_srcu);
rcu_read_unlock();
return 0;
}
int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu)
{
return _intel_gt_reset_lock(gt, srcu, false);
}
int intel_gt_reset_lock_interruptible(struct intel_gt *gt, int *srcu)
{
return _intel_gt_reset_lock(gt, srcu, true);
}
void intel_gt_reset_unlock(struct intel_gt *gt, int tag)
__releases(>->reset.backoff_srcu)
{
srcu_read_unlock(>->reset.backoff_srcu, tag);
}
int intel_gt_terminally_wedged(struct intel_gt *gt)
{
might_sleep();
if (!intel_gt_is_wedged(gt))
return 0;
if (intel_gt_has_unrecoverable_error(gt))
return -EIO;
/* Reset still in progress? Maybe we will recover? */
if (wait_event_interruptible(gt->reset.queue,
!test_bit(I915_RESET_BACKOFF,
>->reset.flags)))
return -EINTR;
return intel_gt_is_wedged(gt) ? -EIO : 0;
}
void intel_gt_set_wedged_on_init(struct intel_gt *gt)
{
BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES >
I915_WEDGED_ON_INIT);
intel_gt_set_wedged(gt);
i915_disable_error_state(gt->i915, -ENODEV);
set_bit(I915_WEDGED_ON_INIT, >->reset.flags);
/* Wedged on init is non-recoverable */
add_taint_for_CI(gt->i915, TAINT_WARN);
}
void intel_gt_set_wedged_on_fini(struct intel_gt *gt)
{
intel_gt_set_wedged(gt);
i915_disable_error_state(gt->i915, -ENODEV);
set_bit(I915_WEDGED_ON_FINI, >->reset.flags);
intel_gt_retire_requests(gt); /* cleanup any wedged requests */
}
void intel_gt_init_reset(struct intel_gt *gt)
{
init_waitqueue_head(>->reset.queue);
mutex_init(>->reset.mutex);
init_srcu_struct(>->reset.backoff_srcu);
/*
* While undesirable to wait inside the shrinker, complain anyway.
*
* If we have to wait during shrinking, we guarantee forward progress
* by forcing the reset. Therefore during the reset we must not
* re-enter the shrinker. By declaring that we take the reset mutex
* within the shrinker, we forbid ourselves from performing any
* fs-reclaim or taking related locks during reset.
*/
i915_gem_shrinker_taints_mutex(gt->i915, >->reset.mutex);
/* no GPU until we are ready! */
__set_bit(I915_WEDGED, >->reset.flags);
}
void intel_gt_fini_reset(struct intel_gt *gt)
{
cleanup_srcu_struct(>->reset.backoff_srcu);
}
static void intel_wedge_me(struct work_struct *work)
{
struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
drm_err(&w->gt->i915->drm,
"%s timed out, cancelling all in-flight rendering.\n",
w->name);
intel_gt_set_wedged(w->gt);
}
void __intel_init_wedge(struct intel_wedge_me *w,
struct intel_gt *gt,
long timeout,
const char *name)
{
w->gt = gt;
w->name = name;
INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me);
queue_delayed_work(gt->i915->unordered_wq, &w->work, timeout);
}
void __intel_fini_wedge(struct intel_wedge_me *w)
{
cancel_delayed_work_sync(&w->work);
destroy_delayed_work_on_stack(&w->work);
w->gt = NULL;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_reset.c"
#include "selftest_hangcheck.c"
#endif
| linux-master | drivers/gpu/drm/i915/gt/intel_reset.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2014 Intel Corporation
*
* Generated by: intel-gpu-tools-1.8-220-g01153e7
*/
#include "intel_renderstate.h"
static const u32 gen8_null_state_relocs[] = {
0x00000798,
0x000007a4,
0x000007ac,
0x000007bc,
-1,
};
static const u32 gen8_null_state_batch[] = {
0x7a000004,
0x01000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x69040000,
0x78140000,
0x04000000,
0x7820000a,
0x00000000,
0x00000000,
0x80000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78130002,
0x00000000,
0x00000000,
0x02001808,
0x781f0002,
0x00000000,
0x00000000,
0x00000000,
0x78510009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78100007,
0x00000000,
0x00000000,
0x00010000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x781b0007,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000800,
0x00000000,
0x78110008,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x781e0003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x781d0007,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78120002,
0x00000000,
0x00000000,
0x00000000,
0x78500003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x781c0002,
0x00000000,
0x00000000,
0x00000000,
0x780c0000,
0x00000000,
0x78520003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78300000,
0x08010040,
0x78310000,
0x1e000000,
0x78320000,
0x1e000000,
0x78330000,
0x1e000000,
0x79190002,
0x00000000,
0x00000000,
0x00000000,
0x791a0002,
0x00000000,
0x00000000,
0x00000000,
0x791b0002,
0x00000000,
0x00000000,
0x00000000,
0x79120000,
0x00000000,
0x79130000,
0x00000000,
0x79140000,
0x00000000,
0x79150000,
0x00000000,
0x79160000,
0x00000000,
0x78150009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78190009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x781a0009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78160009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78170009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78490001,
0x00000000,
0x00000000,
0x784a0000,
0x00000000,
0x784b0000,
0x00000004,
0x79170101,
0x00000000,
0x00000080,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x79180006,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x79180006,
0x20000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x79180006,
0x40000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x79180006,
0x60000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x6101000e,
0x00000001, /* reloc */
0x00000000,
0x00000000,
0x00000001, /* reloc */
0x00000000,
0x00000001, /* reloc */
0x00000000,
0x00000001,
0x00000000,
0x00000001, /* reloc */
0x00000000,
0x00001001,
0x00001001,
0x00000001,
0x00001001,
0x61020001,
0x00000000,
0x00000000,
0x79000002,
0x00000000,
0x00000000,
0x00000000,
0x78050006,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x79040002,
0x00000000,
0x00000000,
0x00000000,
0x79040002,
0x40000000,
0x00000000,
0x00000000,
0x79040002,
0x80000000,
0x00000000,
0x00000000,
0x79040002,
0xc0000000,
0x00000000,
0x00000000,
0x79080001,
0x00000000,
0x00000000,
0x790a0001,
0x00000000,
0x00000000,
0x78060003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78070003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78040001,
0x00000000,
0x00000000,
0x79110000,
0x00000000,
0x780d0000,
0x00000000,
0x79060000,
0x00000000,
0x7907001f,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x7902000f,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x790c000f,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x780a0003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78080083,
0x00004000,
0x00000000,
0x00000000,
0x00000000,
0x04004000,
0x00000000,
0x00000000,
0x00000000,
0x08004000,
0x00000000,
0x00000000,
0x00000000,
0x0c004000,
0x00000000,
0x00000000,
0x00000000,
0x10004000,
0x00000000,
0x00000000,
0x00000000,
0x14004000,
0x00000000,
0x00000000,
0x00000000,
0x18004000,
0x00000000,
0x00000000,
0x00000000,
0x1c004000,
0x00000000,
0x00000000,
0x00000000,
0x20004000,
0x00000000,
0x00000000,
0x00000000,
0x24004000,
0x00000000,
0x00000000,
0x00000000,
0x28004000,
0x00000000,
0x00000000,
0x00000000,
0x2c004000,
0x00000000,
0x00000000,
0x00000000,
0x30004000,
0x00000000,
0x00000000,
0x00000000,
0x34004000,
0x00000000,
0x00000000,
0x00000000,
0x38004000,
0x00000000,
0x00000000,
0x00000000,
0x3c004000,
0x00000000,
0x00000000,
0x00000000,
0x40004000,
0x00000000,
0x00000000,
0x00000000,
0x44004000,
0x00000000,
0x00000000,
0x00000000,
0x48004000,
0x00000000,
0x00000000,
0x00000000,
0x4c004000,
0x00000000,
0x00000000,
0x00000000,
0x50004000,
0x00000000,
0x00000000,
0x00000000,
0x54004000,
0x00000000,
0x00000000,
0x00000000,
0x58004000,
0x00000000,
0x00000000,
0x00000000,
0x5c004000,
0x00000000,
0x00000000,
0x00000000,
0x60004000,
0x00000000,
0x00000000,
0x00000000,
0x64004000,
0x00000000,
0x00000000,
0x00000000,
0x68004000,
0x00000000,
0x00000000,
0x00000000,
0x6c004000,
0x00000000,
0x00000000,
0x00000000,
0x70004000,
0x00000000,
0x00000000,
0x00000000,
0x74004000,
0x00000000,
0x00000000,
0x00000000,
0x78004000,
0x00000000,
0x00000000,
0x00000000,
0x7c004000,
0x00000000,
0x00000000,
0x00000000,
0x80004000,
0x00000000,
0x00000000,
0x00000000,
0x78090043,
0x02000000,
0x22220000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x680b0001,
0x78260000,
0x00000000,
0x78270000,
0x00000000,
0x78280000,
0x00000000,
0x78290000,
0x00000000,
0x782a0000,
0x00000000,
0x780e0000,
0x00000dc1,
0x78240000,
0x00000e01,
0x784f0000,
0x80000100,
0x784d0000,
0x40000000,
0x782b0000,
0x00000000,
0x782c0000,
0x00000000,
0x782d0000,
0x00000000,
0x782e0000,
0x00000000,
0x782f0000,
0x00000000,
0x780f0000,
0x00000000,
0x78230000,
0x00000e60,
0x78210000,
0x00000e80,
0x7b000005,
0x00000004,
0x00000001,
0x00000000,
0x00000001,
0x00000000,
0x00000000,
0x05000000, /* cmds end */
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000, /* state start */
0x00000000,
0x3f800000,
0x3f800000,
0x3f800000,
0x3f800000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000, /* state end */
};
RO_RENDERSTATE(8);
| linux-master | drivers/gpu/drm/i915/gt/gen8_renderstate.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2022 Intel Corporation
*/
#include <drm/drm_device.h>
#include <linux/sysfs.h>
#include <linux/printk.h>
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_sysfs.h"
#include "intel_gt.h"
#include "intel_gt_print.h"
#include "intel_gt_regs.h"
#include "intel_gt_sysfs.h"
#include "intel_gt_sysfs_pm.h"
#include "intel_pcode.h"
#include "intel_rc6.h"
#include "intel_rps.h"
enum intel_gt_sysfs_op {
INTEL_GT_SYSFS_MIN = 0,
INTEL_GT_SYSFS_MAX,
};
static int
sysfs_gt_attribute_w_func(struct kobject *kobj, struct attribute *attr,
int (func)(struct intel_gt *gt, u32 val), u32 val)
{
struct intel_gt *gt;
int ret;
if (!is_object_gt(kobj)) {
int i;
struct device *dev = kobj_to_dev(kobj);
struct drm_i915_private *i915 = kdev_minor_to_i915(dev);
for_each_gt(gt, i915, i) {
ret = func(gt, val);
if (ret)
break;
}
} else {
gt = intel_gt_sysfs_get_drvdata(kobj, attr->name);
ret = func(gt, val);
}
return ret;
}
static u32
sysfs_gt_attribute_r_func(struct kobject *kobj, struct attribute *attr,
u32 (func)(struct intel_gt *gt),
enum intel_gt_sysfs_op op)
{
struct intel_gt *gt;
u32 ret;
ret = (op == INTEL_GT_SYSFS_MAX) ? 0 : (u32) -1;
if (!is_object_gt(kobj)) {
int i;
struct device *dev = kobj_to_dev(kobj);
struct drm_i915_private *i915 = kdev_minor_to_i915(dev);
for_each_gt(gt, i915, i) {
u32 val = func(gt);
switch (op) {
case INTEL_GT_SYSFS_MIN:
if (val < ret)
ret = val;
break;
case INTEL_GT_SYSFS_MAX:
if (val > ret)
ret = val;
break;
}
}
} else {
gt = intel_gt_sysfs_get_drvdata(kobj, attr->name);
ret = func(gt);
}
return ret;
}
/* RC6 interfaces will show the minimum RC6 residency value */
#define sysfs_gt_attribute_r_min_func(d, a, f) \
sysfs_gt_attribute_r_func(d, a, f, INTEL_GT_SYSFS_MIN)
/* Frequency interfaces will show the maximum frequency value */
#define sysfs_gt_attribute_r_max_func(d, a, f) \
sysfs_gt_attribute_r_func(d, a, f, INTEL_GT_SYSFS_MAX)
#define INTEL_GT_SYSFS_SHOW(_name, _attr_type) \
static ssize_t _name##_show_common(struct kobject *kobj, \
struct attribute *attr, char *buff) \
{ \
u32 val = sysfs_gt_attribute_r_##_attr_type##_func(kobj, attr, \
__##_name##_show); \
\
return sysfs_emit(buff, "%u\n", val); \
} \
static ssize_t _name##_show(struct kobject *kobj, \
struct kobj_attribute *attr, char *buff) \
{ \
return _name ##_show_common(kobj, &attr->attr, buff); \
} \
static ssize_t _name##_dev_show(struct device *dev, \
struct device_attribute *attr, char *buff) \
{ \
return _name##_show_common(&dev->kobj, &attr->attr, buff); \
}
#define INTEL_GT_SYSFS_STORE(_name, _func) \
static ssize_t _name##_store_common(struct kobject *kobj, \
struct attribute *attr, \
const char *buff, size_t count) \
{ \
int ret; \
u32 val; \
\
ret = kstrtou32(buff, 0, &val); \
if (ret) \
return ret; \
\
ret = sysfs_gt_attribute_w_func(kobj, attr, _func, val); \
\
return ret ?: count; \
} \
static ssize_t _name##_store(struct kobject *kobj, \
struct kobj_attribute *attr, const char *buff, \
size_t count) \
{ \
return _name##_store_common(kobj, &attr->attr, buff, count); \
} \
static ssize_t _name##_dev_store(struct device *dev, \
struct device_attribute *attr, \
const char *buff, size_t count) \
{ \
return _name##_store_common(&dev->kobj, &attr->attr, buff, count); \
}
#define INTEL_GT_SYSFS_SHOW_MAX(_name) INTEL_GT_SYSFS_SHOW(_name, max)
#define INTEL_GT_SYSFS_SHOW_MIN(_name) INTEL_GT_SYSFS_SHOW(_name, min)
#define INTEL_GT_ATTR_RW(_name) \
static struct kobj_attribute attr_##_name = __ATTR_RW(_name)
#define INTEL_GT_ATTR_RO(_name) \
static struct kobj_attribute attr_##_name = __ATTR_RO(_name)
#define INTEL_GT_DUAL_ATTR_RW(_name) \
static struct device_attribute dev_attr_##_name = __ATTR(_name, 0644, \
_name##_dev_show, \
_name##_dev_store); \
INTEL_GT_ATTR_RW(_name)
#define INTEL_GT_DUAL_ATTR_RO(_name) \
static struct device_attribute dev_attr_##_name = __ATTR(_name, 0444, \
_name##_dev_show, \
NULL); \
INTEL_GT_ATTR_RO(_name)
static u32 get_residency(struct intel_gt *gt, enum intel_rc6_res_type id)
{
intel_wakeref_t wakeref;
u64 res = 0;
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
res = intel_rc6_residency_us(>->rc6, id);
return DIV_ROUND_CLOSEST_ULL(res, 1000);
}
static u8 get_rc6_mask(struct intel_gt *gt)
{
u8 mask = 0;
if (HAS_RC6(gt->i915))
mask |= BIT(0);
if (HAS_RC6p(gt->i915))
mask |= BIT(1);
if (HAS_RC6pp(gt->i915))
mask |= BIT(2);
return mask;
}
static ssize_t rc6_enable_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buff)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
return sysfs_emit(buff, "%x\n", get_rc6_mask(gt));
}
static ssize_t rc6_enable_dev_show(struct device *dev,
struct device_attribute *attr,
char *buff)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(&dev->kobj, attr->attr.name);
return sysfs_emit(buff, "%x\n", get_rc6_mask(gt));
}
static u32 __rc6_residency_ms_show(struct intel_gt *gt)
{
return get_residency(gt, INTEL_RC6_RES_RC6);
}
static u32 __rc6p_residency_ms_show(struct intel_gt *gt)
{
return get_residency(gt, INTEL_RC6_RES_RC6p);
}
static u32 __rc6pp_residency_ms_show(struct intel_gt *gt)
{
return get_residency(gt, INTEL_RC6_RES_RC6pp);
}
static u32 __media_rc6_residency_ms_show(struct intel_gt *gt)
{
return get_residency(gt, INTEL_RC6_RES_VLV_MEDIA);
}
INTEL_GT_SYSFS_SHOW_MIN(rc6_residency_ms);
INTEL_GT_SYSFS_SHOW_MIN(rc6p_residency_ms);
INTEL_GT_SYSFS_SHOW_MIN(rc6pp_residency_ms);
INTEL_GT_SYSFS_SHOW_MIN(media_rc6_residency_ms);
INTEL_GT_DUAL_ATTR_RO(rc6_enable);
INTEL_GT_DUAL_ATTR_RO(rc6_residency_ms);
INTEL_GT_DUAL_ATTR_RO(rc6p_residency_ms);
INTEL_GT_DUAL_ATTR_RO(rc6pp_residency_ms);
INTEL_GT_DUAL_ATTR_RO(media_rc6_residency_ms);
static struct attribute *rc6_attrs[] = {
&attr_rc6_enable.attr,
&attr_rc6_residency_ms.attr,
NULL
};
static struct attribute *rc6p_attrs[] = {
&attr_rc6p_residency_ms.attr,
&attr_rc6pp_residency_ms.attr,
NULL
};
static struct attribute *media_rc6_attrs[] = {
&attr_media_rc6_residency_ms.attr,
NULL
};
static struct attribute *rc6_dev_attrs[] = {
&dev_attr_rc6_enable.attr,
&dev_attr_rc6_residency_ms.attr,
NULL
};
static struct attribute *rc6p_dev_attrs[] = {
&dev_attr_rc6p_residency_ms.attr,
&dev_attr_rc6pp_residency_ms.attr,
NULL
};
static struct attribute *media_rc6_dev_attrs[] = {
&dev_attr_media_rc6_residency_ms.attr,
NULL
};
static const struct attribute_group rc6_attr_group[] = {
{ .attrs = rc6_attrs, },
{ .name = power_group_name, .attrs = rc6_dev_attrs, },
};
static const struct attribute_group rc6p_attr_group[] = {
{ .attrs = rc6p_attrs, },
{ .name = power_group_name, .attrs = rc6p_dev_attrs, },
};
static const struct attribute_group media_rc6_attr_group[] = {
{ .attrs = media_rc6_attrs, },
{ .name = power_group_name, .attrs = media_rc6_dev_attrs, },
};
static int __intel_gt_sysfs_create_group(struct kobject *kobj,
const struct attribute_group *grp)
{
return is_object_gt(kobj) ?
sysfs_create_group(kobj, &grp[0]) :
sysfs_merge_group(kobj, &grp[1]);
}
static void intel_sysfs_rc6_init(struct intel_gt *gt, struct kobject *kobj)
{
int ret;
if (!IS_ENABLED(CONFIG_PM) || !HAS_RC6(gt->i915))
return;
ret = __intel_gt_sysfs_create_group(kobj, rc6_attr_group);
if (ret)
gt_warn(gt, "failed to create RC6 sysfs files (%pe)\n", ERR_PTR(ret));
/*
* cannot use the is_visible() attribute because
* the upper object inherits from the parent group.
*/
if (HAS_RC6p(gt->i915)) {
ret = __intel_gt_sysfs_create_group(kobj, rc6p_attr_group);
if (ret)
gt_warn(gt, "failed to create RC6p sysfs files (%pe)\n", ERR_PTR(ret));
}
if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915)) {
ret = __intel_gt_sysfs_create_group(kobj, media_rc6_attr_group);
if (ret)
gt_warn(gt, "failed to create media RC6 sysfs files (%pe)\n", ERR_PTR(ret));
}
}
static u32 __act_freq_mhz_show(struct intel_gt *gt)
{
return intel_rps_read_actual_frequency(>->rps);
}
static u32 __cur_freq_mhz_show(struct intel_gt *gt)
{
return intel_rps_get_requested_frequency(>->rps);
}
static u32 __boost_freq_mhz_show(struct intel_gt *gt)
{
return intel_rps_get_boost_frequency(>->rps);
}
static int __boost_freq_mhz_store(struct intel_gt *gt, u32 val)
{
return intel_rps_set_boost_frequency(>->rps, val);
}
static u32 __RP0_freq_mhz_show(struct intel_gt *gt)
{
return intel_rps_get_rp0_frequency(>->rps);
}
static u32 __RPn_freq_mhz_show(struct intel_gt *gt)
{
return intel_rps_get_rpn_frequency(>->rps);
}
static u32 __RP1_freq_mhz_show(struct intel_gt *gt)
{
return intel_rps_get_rp1_frequency(>->rps);
}
static u32 __max_freq_mhz_show(struct intel_gt *gt)
{
return intel_rps_get_max_frequency(>->rps);
}
static int __set_max_freq(struct intel_gt *gt, u32 val)
{
return intel_rps_set_max_frequency(>->rps, val);
}
static u32 __min_freq_mhz_show(struct intel_gt *gt)
{
return intel_rps_get_min_frequency(>->rps);
}
static int __set_min_freq(struct intel_gt *gt, u32 val)
{
return intel_rps_set_min_frequency(>->rps, val);
}
static u32 __vlv_rpe_freq_mhz_show(struct intel_gt *gt)
{
struct intel_rps *rps = >->rps;
return intel_gpu_freq(rps, rps->efficient_freq);
}
INTEL_GT_SYSFS_SHOW_MAX(act_freq_mhz);
INTEL_GT_SYSFS_SHOW_MAX(boost_freq_mhz);
INTEL_GT_SYSFS_SHOW_MAX(cur_freq_mhz);
INTEL_GT_SYSFS_SHOW_MAX(RP0_freq_mhz);
INTEL_GT_SYSFS_SHOW_MAX(RP1_freq_mhz);
INTEL_GT_SYSFS_SHOW_MAX(RPn_freq_mhz);
INTEL_GT_SYSFS_SHOW_MAX(max_freq_mhz);
INTEL_GT_SYSFS_SHOW_MIN(min_freq_mhz);
INTEL_GT_SYSFS_SHOW_MAX(vlv_rpe_freq_mhz);
INTEL_GT_SYSFS_STORE(boost_freq_mhz, __boost_freq_mhz_store);
INTEL_GT_SYSFS_STORE(max_freq_mhz, __set_max_freq);
INTEL_GT_SYSFS_STORE(min_freq_mhz, __set_min_freq);
#define INTEL_GT_RPS_SYSFS_ATTR(_name, _mode, _show, _store, _show_dev, _store_dev) \
static struct device_attribute dev_attr_gt_##_name = __ATTR(gt_##_name, _mode, \
_show_dev, _store_dev); \
static struct kobj_attribute attr_rps_##_name = __ATTR(rps_##_name, _mode, \
_show, _store)
#define INTEL_GT_RPS_SYSFS_ATTR_RO(_name) \
INTEL_GT_RPS_SYSFS_ATTR(_name, 0444, _name##_show, NULL, \
_name##_dev_show, NULL)
#define INTEL_GT_RPS_SYSFS_ATTR_RW(_name) \
INTEL_GT_RPS_SYSFS_ATTR(_name, 0644, _name##_show, _name##_store, \
_name##_dev_show, _name##_dev_store)
/* The below macros generate static structures */
INTEL_GT_RPS_SYSFS_ATTR_RO(act_freq_mhz);
INTEL_GT_RPS_SYSFS_ATTR_RO(cur_freq_mhz);
INTEL_GT_RPS_SYSFS_ATTR_RW(boost_freq_mhz);
INTEL_GT_RPS_SYSFS_ATTR_RO(RP0_freq_mhz);
INTEL_GT_RPS_SYSFS_ATTR_RO(RP1_freq_mhz);
INTEL_GT_RPS_SYSFS_ATTR_RO(RPn_freq_mhz);
INTEL_GT_RPS_SYSFS_ATTR_RW(max_freq_mhz);
INTEL_GT_RPS_SYSFS_ATTR_RW(min_freq_mhz);
INTEL_GT_RPS_SYSFS_ATTR_RO(vlv_rpe_freq_mhz);
#define GEN6_ATTR(p, s) { \
&p##attr_##s##_act_freq_mhz.attr, \
&p##attr_##s##_cur_freq_mhz.attr, \
&p##attr_##s##_boost_freq_mhz.attr, \
&p##attr_##s##_max_freq_mhz.attr, \
&p##attr_##s##_min_freq_mhz.attr, \
&p##attr_##s##_RP0_freq_mhz.attr, \
&p##attr_##s##_RP1_freq_mhz.attr, \
&p##attr_##s##_RPn_freq_mhz.attr, \
NULL, \
}
#define GEN6_RPS_ATTR GEN6_ATTR(, rps)
#define GEN6_GT_ATTR GEN6_ATTR(dev_, gt)
static const struct attribute * const gen6_rps_attrs[] = GEN6_RPS_ATTR;
static const struct attribute * const gen6_gt_attrs[] = GEN6_GT_ATTR;
static ssize_t punit_req_freq_mhz_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buff)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
u32 preq = intel_rps_read_punit_req_frequency(>->rps);
return sysfs_emit(buff, "%u\n", preq);
}
static ssize_t slpc_ignore_eff_freq_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buff)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
struct intel_guc_slpc *slpc = >->uc.guc.slpc;
return sysfs_emit(buff, "%u\n", slpc->ignore_eff_freq);
}
static ssize_t slpc_ignore_eff_freq_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buff, size_t count)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
struct intel_guc_slpc *slpc = >->uc.guc.slpc;
int err;
u32 val;
err = kstrtou32(buff, 0, &val);
if (err)
return err;
err = intel_guc_slpc_set_ignore_eff_freq(slpc, val);
return err ?: count;
}
struct intel_gt_bool_throttle_attr {
struct attribute attr;
ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
char *buf);
i915_reg_t (*reg32)(struct intel_gt *gt);
u32 mask;
};
static ssize_t throttle_reason_bool_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buff)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
struct intel_gt_bool_throttle_attr *t_attr =
(struct intel_gt_bool_throttle_attr *) attr;
bool val = rps_read_mask_mmio(>->rps, t_attr->reg32(gt), t_attr->mask);
return sysfs_emit(buff, "%u\n", val);
}
#define INTEL_GT_RPS_BOOL_ATTR_RO(sysfs_func__, mask__) \
struct intel_gt_bool_throttle_attr attr_##sysfs_func__ = { \
.attr = { .name = __stringify(sysfs_func__), .mode = 0444 }, \
.show = throttle_reason_bool_show, \
.reg32 = intel_gt_perf_limit_reasons_reg, \
.mask = mask__, \
}
INTEL_GT_ATTR_RO(punit_req_freq_mhz);
static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_status, GT0_PERF_LIMIT_REASONS_MASK);
static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_pl1, POWER_LIMIT_1_MASK);
static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_pl2, POWER_LIMIT_2_MASK);
static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_pl4, POWER_LIMIT_4_MASK);
static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_thermal, THERMAL_LIMIT_MASK);
static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_prochot, PROCHOT_MASK);
static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_ratl, RATL_MASK);
static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_vr_thermalert, VR_THERMALERT_MASK);
static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_vr_tdc, VR_TDC_MASK);
static const struct attribute *throttle_reason_attrs[] = {
&attr_throttle_reason_status.attr,
&attr_throttle_reason_pl1.attr,
&attr_throttle_reason_pl2.attr,
&attr_throttle_reason_pl4.attr,
&attr_throttle_reason_thermal.attr,
&attr_throttle_reason_prochot.attr,
&attr_throttle_reason_ratl.attr,
&attr_throttle_reason_vr_thermalert.attr,
&attr_throttle_reason_vr_tdc.attr,
NULL
};
/*
* Scaling for multipliers (aka frequency factors).
* The format of the value in the register is u8.8.
*
* The presentation to userspace is inspired by the perf event framework.
* See:
* Documentation/ABI/testing/sysfs-bus-event_source-devices-events
* for description of:
* /sys/bus/event_source/devices/<pmu>/events/<event>.scale
*
* Summary: Expose two sysfs files for each multiplier.
*
* 1. File <attr> contains a raw hardware value.
* 2. File <attr>.scale contains the multiplicative scale factor to be
* used by userspace to compute the actual value.
*
* So userspace knows that to get the frequency_factor it multiplies the
* provided value by the specified scale factor and vice-versa.
*
* That way there is no precision loss in the kernel interface and API
* is future proof should one day the hardware register change to u16.u16,
* on some platform. (Or any other fixed point representation.)
*
* Example:
* File <attr> contains the value 2.5, represented as u8.8 0x0280, which
* is comprised of:
* - an integer part of 2
* - a fractional part of 0x80 (representing 0x80 / 2^8 == 0x80 / 256).
* File <attr>.scale contains a string representation of floating point
* value 0.00390625 (which is (1 / 256)).
* Userspace computes the actual value:
* 0x0280 * 0.00390625 -> 2.5
* or converts an actual value to the value to be written into <attr>:
* 2.5 / 0.00390625 -> 0x0280
*/
#define U8_8_VAL_MASK 0xffff
#define U8_8_SCALE_TO_VALUE "0.00390625"
static ssize_t freq_factor_scale_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buff)
{
return sysfs_emit(buff, "%s\n", U8_8_SCALE_TO_VALUE);
}
static u32 media_ratio_mode_to_factor(u32 mode)
{
/* 0 -> 0, 1 -> 256, 2 -> 128 */
return !mode ? mode : 256 / mode;
}
static ssize_t media_freq_factor_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buff)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
struct intel_guc_slpc *slpc = >->uc.guc.slpc;
intel_wakeref_t wakeref;
u32 mode;
/*
* Retrieve media_ratio_mode from GEN6_RPNSWREQ bit 13 set by
* GuC. GEN6_RPNSWREQ:13 value 0 represents 1:2 and 1 represents 1:1
*/
if (IS_XEHPSDV(gt->i915) &&
slpc->media_ratio_mode == SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL) {
/*
* For XEHPSDV dynamic mode GEN6_RPNSWREQ:13 does not contain
* the media_ratio_mode, just return the cached media ratio
*/
mode = slpc->media_ratio_mode;
} else {
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
mode = intel_uncore_read(gt->uncore, GEN6_RPNSWREQ);
mode = REG_FIELD_GET(GEN12_MEDIA_FREQ_RATIO, mode) ?
SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_ONE :
SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO;
}
return sysfs_emit(buff, "%u\n", media_ratio_mode_to_factor(mode));
}
static ssize_t media_freq_factor_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buff, size_t count)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
struct intel_guc_slpc *slpc = >->uc.guc.slpc;
u32 factor, mode;
int err;
err = kstrtou32(buff, 0, &factor);
if (err)
return err;
for (mode = SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL;
mode <= SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO; mode++)
if (factor == media_ratio_mode_to_factor(mode))
break;
if (mode > SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO)
return -EINVAL;
err = intel_guc_slpc_set_media_ratio_mode(slpc, mode);
if (!err) {
slpc->media_ratio_mode = mode;
DRM_DEBUG("Set slpc->media_ratio_mode to %d", mode);
}
return err ?: count;
}
static ssize_t media_RP0_freq_mhz_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buff)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
u32 val;
int err;
err = snb_pcode_read_p(gt->uncore, XEHP_PCODE_FREQUENCY_CONFIG,
PCODE_MBOX_FC_SC_READ_FUSED_P0,
PCODE_MBOX_DOMAIN_MEDIAFF, &val);
if (err)
return err;
/* Fused media RP0 read from pcode is in units of 50 MHz */
val *= GT_FREQUENCY_MULTIPLIER;
return sysfs_emit(buff, "%u\n", val);
}
static ssize_t media_RPn_freq_mhz_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buff)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
u32 val;
int err;
err = snb_pcode_read_p(gt->uncore, XEHP_PCODE_FREQUENCY_CONFIG,
PCODE_MBOX_FC_SC_READ_FUSED_PN,
PCODE_MBOX_DOMAIN_MEDIAFF, &val);
if (err)
return err;
/* Fused media RPn read from pcode is in units of 50 MHz */
val *= GT_FREQUENCY_MULTIPLIER;
return sysfs_emit(buff, "%u\n", val);
}
INTEL_GT_ATTR_RW(media_freq_factor);
static struct kobj_attribute attr_media_freq_factor_scale =
__ATTR(media_freq_factor.scale, 0444, freq_factor_scale_show, NULL);
INTEL_GT_ATTR_RO(media_RP0_freq_mhz);
INTEL_GT_ATTR_RO(media_RPn_freq_mhz);
INTEL_GT_ATTR_RW(slpc_ignore_eff_freq);
static const struct attribute *media_perf_power_attrs[] = {
&attr_media_freq_factor.attr,
&attr_media_freq_factor_scale.attr,
&attr_media_RP0_freq_mhz.attr,
&attr_media_RPn_freq_mhz.attr,
NULL
};
static ssize_t
rps_up_threshold_pct_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
struct intel_rps *rps = >->rps;
return sysfs_emit(buf, "%u\n", intel_rps_get_up_threshold(rps));
}
static ssize_t
rps_up_threshold_pct_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
struct intel_rps *rps = >->rps;
int ret;
u8 val;
ret = kstrtou8(buf, 10, &val);
if (ret)
return ret;
ret = intel_rps_set_up_threshold(rps, val);
return ret == 0 ? count : ret;
}
static struct kobj_attribute rps_up_threshold_pct =
__ATTR(rps_up_threshold_pct,
0664,
rps_up_threshold_pct_show,
rps_up_threshold_pct_store);
static ssize_t
rps_down_threshold_pct_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
struct intel_rps *rps = >->rps;
return sysfs_emit(buf, "%u\n", intel_rps_get_down_threshold(rps));
}
static ssize_t
rps_down_threshold_pct_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
struct intel_rps *rps = >->rps;
int ret;
u8 val;
ret = kstrtou8(buf, 10, &val);
if (ret)
return ret;
ret = intel_rps_set_down_threshold(rps, val);
return ret == 0 ? count : ret;
}
static struct kobj_attribute rps_down_threshold_pct =
__ATTR(rps_down_threshold_pct,
0664,
rps_down_threshold_pct_show,
rps_down_threshold_pct_store);
static const struct attribute * const gen6_gt_rps_attrs[] = {
&rps_up_threshold_pct.attr,
&rps_down_threshold_pct.attr,
NULL
};
static ssize_t
default_min_freq_mhz_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_gt *gt = kobj_to_gt(kobj->parent);
return sysfs_emit(buf, "%u\n", gt->defaults.min_freq);
}
static struct kobj_attribute default_min_freq_mhz =
__ATTR(rps_min_freq_mhz, 0444, default_min_freq_mhz_show, NULL);
static ssize_t
default_max_freq_mhz_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct intel_gt *gt = kobj_to_gt(kobj->parent);
return sysfs_emit(buf, "%u\n", gt->defaults.max_freq);
}
static struct kobj_attribute default_max_freq_mhz =
__ATTR(rps_max_freq_mhz, 0444, default_max_freq_mhz_show, NULL);
static ssize_t
default_rps_up_threshold_pct_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
struct intel_gt *gt = kobj_to_gt(kobj->parent);
return sysfs_emit(buf, "%u\n", gt->defaults.rps_up_threshold);
}
static struct kobj_attribute default_rps_up_threshold_pct =
__ATTR(rps_up_threshold_pct, 0444, default_rps_up_threshold_pct_show, NULL);
static ssize_t
default_rps_down_threshold_pct_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
struct intel_gt *gt = kobj_to_gt(kobj->parent);
return sysfs_emit(buf, "%u\n", gt->defaults.rps_down_threshold);
}
static struct kobj_attribute default_rps_down_threshold_pct =
__ATTR(rps_down_threshold_pct, 0444, default_rps_down_threshold_pct_show, NULL);
static const struct attribute * const rps_defaults_attrs[] = {
&default_min_freq_mhz.attr,
&default_max_freq_mhz.attr,
&default_rps_up_threshold_pct.attr,
&default_rps_down_threshold_pct.attr,
NULL
};
static int intel_sysfs_rps_init(struct intel_gt *gt, struct kobject *kobj)
{
const struct attribute * const *attrs;
struct attribute *vlv_attr;
int ret;
if (GRAPHICS_VER(gt->i915) < 6)
return 0;
if (is_object_gt(kobj)) {
attrs = gen6_rps_attrs;
vlv_attr = &attr_rps_vlv_rpe_freq_mhz.attr;
} else {
attrs = gen6_gt_attrs;
vlv_attr = &dev_attr_gt_vlv_rpe_freq_mhz.attr;
}
ret = sysfs_create_files(kobj, attrs);
if (ret)
return ret;
if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915))
ret = sysfs_create_file(kobj, vlv_attr);
if (is_object_gt(kobj) && !intel_uc_uses_guc_slpc(>->uc)) {
ret = sysfs_create_files(kobj, gen6_gt_rps_attrs);
if (ret)
return ret;
}
return ret;
}
void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj)
{
int ret;
intel_sysfs_rc6_init(gt, kobj);
ret = intel_sysfs_rps_init(gt, kobj);
if (ret)
gt_warn(gt, "failed to create RPS sysfs files (%pe)", ERR_PTR(ret));
/* end of the legacy interfaces */
if (!is_object_gt(kobj))
return;
ret = sysfs_create_file(kobj, &attr_punit_req_freq_mhz.attr);
if (ret)
gt_warn(gt, "failed to create punit_req_freq_mhz sysfs (%pe)", ERR_PTR(ret));
if (intel_uc_uses_guc_slpc(>->uc)) {
ret = sysfs_create_file(kobj, &attr_slpc_ignore_eff_freq.attr);
if (ret)
gt_warn(gt, "failed to create ignore_eff_freq sysfs (%pe)", ERR_PTR(ret));
}
if (i915_mmio_reg_valid(intel_gt_perf_limit_reasons_reg(gt))) {
ret = sysfs_create_files(kobj, throttle_reason_attrs);
if (ret)
gt_warn(gt, "failed to create throttle sysfs files (%pe)", ERR_PTR(ret));
}
if (HAS_MEDIA_RATIO_MODE(gt->i915) && intel_uc_uses_guc_slpc(>->uc)) {
ret = sysfs_create_files(kobj, media_perf_power_attrs);
if (ret)
gt_warn(gt, "failed to create media_perf_power_attrs sysfs (%pe)\n",
ERR_PTR(ret));
}
ret = sysfs_create_files(gt->sysfs_defaults, rps_defaults_attrs);
if (ret)
gt_warn(gt, "failed to add rps defaults (%pe)\n", ERR_PTR(ret));
}
| linux-master | drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021 Intel Corporation
*/
#include <drm/drm_managed.h>
#include "i915_drv.h"
#include "gt/intel_gt.h"
#include "gt/intel_sa_media.h"
int intel_sa_mediagt_setup(struct intel_gt *gt, phys_addr_t phys_addr,
u32 gsi_offset)
{
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore;
uncore = drmm_kzalloc(&i915->drm, sizeof(*uncore), GFP_KERNEL);
if (!uncore)
return -ENOMEM;
uncore->gsi_offset = gsi_offset;
gt->irq_lock = to_gt(i915)->irq_lock;
intel_gt_common_init_early(gt);
intel_uncore_init_early(uncore, gt);
/*
* Standalone media shares the general MMIO space with the primary
* GT. We'll re-use the primary GT's mapping.
*/
uncore->regs = intel_uncore_regs(&i915->uncore);
if (drm_WARN_ON(&i915->drm, uncore->regs == NULL))
return -EIO;
gt->uncore = uncore;
gt->phys_addr = phys_addr;
/*
* For current platforms we can assume there's only a single
* media GT and cache it for quick lookup.
*/
drm_WARN_ON(&i915->drm, i915->media_gt);
i915->media_gt = gt;
return 0;
}
| linux-master | drivers/gpu/drm/i915/gt/intel_sa_media.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*
* Generated by: IGT Gpu Tools on Fri 21 Feb 2020 05:29:32 AM UTC
*/
static const u32 ivb_clear_kernel[] = {
0x00000001, 0x26020128, 0x00000024, 0x00000000,
0x00000040, 0x20280c21, 0x00000028, 0x00000001,
0x01000010, 0x20000c20, 0x0000002c, 0x00000000,
0x00010220, 0x34001c00, 0x00001400, 0x0000002c,
0x00600001, 0x20600061, 0x00000000, 0x00000000,
0x00000008, 0x20601c85, 0x00000e00, 0x0000000c,
0x00000005, 0x20601ca5, 0x00000060, 0x00000001,
0x00000008, 0x20641c85, 0x00000e00, 0x0000000d,
0x00000005, 0x20641ca5, 0x00000064, 0x00000003,
0x00000041, 0x207424a5, 0x00000064, 0x00000034,
0x00000040, 0x206014a5, 0x00000060, 0x00000074,
0x00000008, 0x20681c85, 0x00000e00, 0x00000008,
0x00000005, 0x20681ca5, 0x00000068, 0x0000000f,
0x00000041, 0x20701ca5, 0x00000060, 0x00000010,
0x00000040, 0x206814a5, 0x00000068, 0x00000070,
0x00600001, 0x20a00061, 0x00000000, 0x00000000,
0x00000005, 0x206c1c85, 0x00000e00, 0x00000007,
0x00000041, 0x206c1ca5, 0x0000006c, 0x00000004,
0x00600001, 0x20800021, 0x008d0000, 0x00000000,
0x00000001, 0x20800021, 0x0000006c, 0x00000000,
0x00000001, 0x20840021, 0x00000068, 0x00000000,
0x00000001, 0x20880061, 0x00000000, 0x00000003,
0x00000005, 0x208c0d21, 0x00000086, 0xffffffff,
0x05600032, 0x20a00fa1, 0x008d0080, 0x02190001,
0x00000040, 0x20a01ca5, 0x000000a0, 0x00000001,
0x05600032, 0x20a00fa1, 0x008d0080, 0x040a8001,
0x02000040, 0x20281c21, 0x00000028, 0xffffffff,
0x00010220, 0x34001c00, 0x00001400, 0xfffffffc,
0x00000001, 0x26020128, 0x00000024, 0x00000000,
0x00000001, 0x220010e4, 0x00000000, 0x00000000,
0x00000001, 0x220831ec, 0x00000000, 0x007f007f,
0x00600001, 0x20400021, 0x008d0000, 0x00000000,
0x00600001, 0x2fe00021, 0x008d0000, 0x00000000,
0x00200001, 0x20400121, 0x00450020, 0x00000000,
0x00000001, 0x20480061, 0x00000000, 0x000f000f,
0x00000005, 0x204c0d21, 0x00000046, 0xffffffef,
0x00800001, 0x20600061, 0x00000000, 0x00000000,
0x00800001, 0x20800061, 0x00000000, 0x00000000,
0x00800001, 0x20a00061, 0x00000000, 0x00000000,
0x00800001, 0x20c00061, 0x00000000, 0x00000000,
0x00800001, 0x20e00061, 0x00000000, 0x00000000,
0x00800001, 0x21000061, 0x00000000, 0x00000000,
0x00800001, 0x21200061, 0x00000000, 0x00000000,
0x00800001, 0x21400061, 0x00000000, 0x00000000,
0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
0x00000040, 0x20402d21, 0x00000020, 0x00100010,
0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
0x02000040, 0x22083d8c, 0x00000208, 0xffffffff,
0x00800001, 0xa0000109, 0x00000602, 0x00000000,
0x00000040, 0x22001c84, 0x00000200, 0x00000020,
0x00010220, 0x34001c00, 0x00001400, 0xfffffff8,
0x07600032, 0x20000fa0, 0x008d0fe0, 0x82000010,
};
| linux-master | drivers/gpu/drm/i915/gt/ivb_clear_kernel.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
#include <linux/log2.h>
#include "gem/i915_gem_internal.h"
#include "gen6_ppgtt.h"
#include "i915_scatterlist.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
#include "intel_gt_regs.h"
#include "intel_engine_regs.h"
#include "intel_gt.h"
/* Write pde (index) from the page directory @pd to the page table @pt */
static void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
const unsigned int pde,
const struct i915_page_table *pt)
{
dma_addr_t addr = pt ? px_dma(pt) : px_dma(ppgtt->base.vm.scratch[1]);
/* Caller needs to make sure the write completes if necessary */
iowrite32(GEN6_PDE_ADDR_ENCODE(addr) | GEN6_PDE_VALID,
ppgtt->pd_addr + pde);
}
void gen7_ppgtt_enable(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
u32 ecochk;
intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B);
ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
if (IS_HASWELL(i915)) {
ecochk |= ECOCHK_PPGTT_WB_HSW;
} else {
ecochk |= ECOCHK_PPGTT_LLC_IVB;
ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
}
intel_uncore_write(uncore, GAM_ECOCHK, ecochk);
}
void gen6_ppgtt_enable(struct intel_gt *gt)
{
struct intel_uncore *uncore = gt->uncore;
intel_uncore_rmw(uncore,
GAC_ECO_BITS,
0,
ECOBITS_SNB_BIT | ECOBITS_PPGTT_CACHE64B);
intel_uncore_rmw(uncore,
GAB_CTL,
0,
GAB_CTL_CONT_AFTER_PAGEFAULT);
intel_uncore_rmw(uncore,
GAM_ECOCHK,
0,
ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
if (HAS_PPGTT(uncore->i915)) /* may be disabled for VT-d */
intel_uncore_write(uncore,
GFX_MODE,
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
}
/* PPGTT support for Sandybdrige/Gen6 and later */
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
u64 start, u64 length)
{
struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
const unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
const gen6_pte_t scratch_pte = vm->scratch[0]->encode;
unsigned int pde = first_entry / GEN6_PTES;
unsigned int pte = first_entry % GEN6_PTES;
unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
while (num_entries) {
struct i915_page_table * const pt =
i915_pt_entry(ppgtt->base.pd, pde++);
const unsigned int count = min(num_entries, GEN6_PTES - pte);
gen6_pte_t *vaddr;
num_entries -= count;
GEM_BUG_ON(count > atomic_read(&pt->used));
if (!atomic_sub_return(count, &pt->used))
ppgtt->scan_for_unused_pt = true;
/*
* Note that the hw doesn't support removing PDE on the fly
* (they are cached inside the context with no means to
* invalidate the cache), so we can only reset the PTE
* entries back to scratch.
*/
vaddr = px_vaddr(pt);
memset32(vaddr + pte, scratch_pte, count);
pte = 0;
}
}
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
struct i915_vma_resource *vma_res,
unsigned int pat_index,
u32 flags)
{
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_directory * const pd = ppgtt->pd;
unsigned int first_entry = vma_res->start / I915_GTT_PAGE_SIZE;
unsigned int act_pt = first_entry / GEN6_PTES;
unsigned int act_pte = first_entry % GEN6_PTES;
const u32 pte_encode = vm->pte_encode(0, pat_index, flags);
struct sgt_dma iter = sgt_dma(vma_res);
gen6_pte_t *vaddr;
GEM_BUG_ON(!pd->entry[act_pt]);
vaddr = px_vaddr(i915_pt_entry(pd, act_pt));
do {
GEM_BUG_ON(sg_dma_len(iter.sg) < I915_GTT_PAGE_SIZE);
vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
iter.dma += I915_GTT_PAGE_SIZE;
if (iter.dma == iter.max) {
iter.sg = __sg_next(iter.sg);
if (!iter.sg || sg_dma_len(iter.sg) == 0)
break;
iter.dma = sg_dma_address(iter.sg);
iter.max = iter.dma + sg_dma_len(iter.sg);
}
if (++act_pte == GEN6_PTES) {
vaddr = px_vaddr(i915_pt_entry(pd, ++act_pt));
act_pte = 0;
}
} while (1);
vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
}
static void gen6_flush_pd(struct gen6_ppgtt *ppgtt, u64 start, u64 end)
{
struct i915_page_directory * const pd = ppgtt->base.pd;
struct i915_page_table *pt;
unsigned int pde;
start = round_down(start, SZ_64K);
end = round_up(end, SZ_64K) - start;
mutex_lock(&ppgtt->flush);
gen6_for_each_pde(pt, pd, start, end, pde)
gen6_write_pde(ppgtt, pde, pt);
mb();
ioread32(ppgtt->pd_addr + pde - 1);
gen6_ggtt_invalidate(ppgtt->base.vm.gt->ggtt);
mb();
mutex_unlock(&ppgtt->flush);
}
static void gen6_alloc_va_range(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
u64 start, u64 length)
{
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
struct i915_page_directory * const pd = ppgtt->base.pd;
struct i915_page_table *pt;
bool flush = false;
u64 from = start;
unsigned int pde;
spin_lock(&pd->lock);
gen6_for_each_pde(pt, pd, start, length, pde) {
const unsigned int count = gen6_pte_count(start, length);
if (!pt) {
spin_unlock(&pd->lock);
pt = stash->pt[0];
__i915_gem_object_pin_pages(pt->base);
fill32_px(pt, vm->scratch[0]->encode);
spin_lock(&pd->lock);
if (!pd->entry[pde]) {
stash->pt[0] = pt->stash;
atomic_set(&pt->used, 0);
pd->entry[pde] = pt;
} else {
pt = pd->entry[pde];
}
flush = true;
}
atomic_add(count, &pt->used);
}
spin_unlock(&pd->lock);
if (flush && i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) {
intel_wakeref_t wakeref;
with_intel_runtime_pm(&vm->i915->runtime_pm, wakeref)
gen6_flush_pd(ppgtt, from, start);
}
}
static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)
{
struct i915_address_space * const vm = &ppgtt->base.vm;
int ret;
ret = setup_scratch_page(vm);
if (ret)
return ret;
vm->scratch[0]->encode =
vm->pte_encode(px_dma(vm->scratch[0]),
i915_gem_get_pat_index(vm->i915,
I915_CACHE_NONE),
PTE_READ_ONLY);
vm->scratch[1] = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
if (IS_ERR(vm->scratch[1])) {
ret = PTR_ERR(vm->scratch[1]);
goto err_scratch0;
}
ret = map_pt_dma(vm, vm->scratch[1]);
if (ret)
goto err_scratch1;
fill32_px(vm->scratch[1], vm->scratch[0]->encode);
return 0;
err_scratch1:
i915_gem_object_put(vm->scratch[1]);
err_scratch0:
i915_gem_object_put(vm->scratch[0]);
vm->scratch[0] = NULL;
return ret;
}
static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt)
{
struct i915_page_directory * const pd = ppgtt->base.pd;
struct i915_page_table *pt;
u32 pde;
gen6_for_all_pdes(pt, pd, pde)
if (pt)
free_pt(&ppgtt->base.vm, pt);
}
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
gen6_ppgtt_free_pd(ppgtt);
free_scratch(vm);
if (ppgtt->base.pd)
free_pd(&ppgtt->base.vm, ppgtt->base.pd);
mutex_destroy(&ppgtt->flush);
}
static void pd_vma_bind(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
struct i915_vma_resource *vma_res,
unsigned int pat_index,
u32 unused)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
struct gen6_ppgtt *ppgtt = vma_res->private;
u32 ggtt_offset = vma_res->start / I915_GTT_PAGE_SIZE;
ppgtt->pp_dir = ggtt_offset * sizeof(gen6_pte_t) << 10;
ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
gen6_flush_pd(ppgtt, 0, ppgtt->base.vm.total);
}
static void pd_vma_unbind(struct i915_address_space *vm,
struct i915_vma_resource *vma_res)
{
struct gen6_ppgtt *ppgtt = vma_res->private;
struct i915_page_directory * const pd = ppgtt->base.pd;
struct i915_page_table *pt;
unsigned int pde;
if (!ppgtt->scan_for_unused_pt)
return;
/* Free all no longer used page tables */
gen6_for_all_pdes(pt, ppgtt->base.pd, pde) {
if (!pt || atomic_read(&pt->used))
continue;
free_pt(&ppgtt->base.vm, pt);
pd->entry[pde] = NULL;
}
ppgtt->scan_for_unused_pt = false;
}
static const struct i915_vma_ops pd_vma_ops = {
.bind_vma = pd_vma_bind,
.unbind_vma = pd_vma_unbind,
};
int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww)
{
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
int err;
GEM_BUG_ON(!kref_read(&ppgtt->base.vm.ref));
/*
* Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
* which will be pinned into every active context.
* (When vma->pin_count becomes atomic, I expect we will naturally
* need a larger, unpacked, type and kill this redundancy.)
*/
if (atomic_add_unless(&ppgtt->pin_count, 1, 0))
return 0;
/* grab the ppgtt resv to pin the object */
err = i915_vm_lock_objects(&ppgtt->base.vm, ww);
if (err)
return err;
/*
* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
* allocator works in address space sizes, so it's multiplied by page
* size. We allocate at the top of the GTT to avoid fragmentation.
*/
if (!atomic_read(&ppgtt->pin_count)) {
err = i915_ggtt_pin(ppgtt->vma, ww, GEN6_PD_ALIGN, PIN_HIGH);
GEM_BUG_ON(ppgtt->vma->fence);
clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(ppgtt->vma));
}
if (!err)
atomic_inc(&ppgtt->pin_count);
return err;
}
static int pd_dummy_obj_get_pages(struct drm_i915_gem_object *obj)
{
obj->mm.pages = ZERO_SIZE_PTR;
return 0;
}
static void pd_dummy_obj_put_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
}
static const struct drm_i915_gem_object_ops pd_dummy_obj_ops = {
.name = "pd_dummy_obj",
.get_pages = pd_dummy_obj_get_pages,
.put_pages = pd_dummy_obj_put_pages,
};
static struct i915_page_directory *
gen6_alloc_top_pd(struct gen6_ppgtt *ppgtt)
{
struct i915_ggtt * const ggtt = ppgtt->base.vm.gt->ggtt;
struct i915_page_directory *pd;
int err;
pd = __alloc_pd(I915_PDES);
if (unlikely(!pd))
return ERR_PTR(-ENOMEM);
pd->pt.base = __i915_gem_object_create_internal(ppgtt->base.vm.gt->i915,
&pd_dummy_obj_ops,
I915_PDES * SZ_4K);
if (IS_ERR(pd->pt.base)) {
err = PTR_ERR(pd->pt.base);
pd->pt.base = NULL;
goto err_pd;
}
pd->pt.base->base.resv = i915_vm_resv_get(&ppgtt->base.vm);
pd->pt.base->shares_resv_from = &ppgtt->base.vm;
ppgtt->vma = i915_vma_instance(pd->pt.base, &ggtt->vm, NULL);
if (IS_ERR(ppgtt->vma)) {
err = PTR_ERR(ppgtt->vma);
ppgtt->vma = NULL;
goto err_pd;
}
/* The dummy object we create is special, override ops.. */
ppgtt->vma->ops = &pd_vma_ops;
ppgtt->vma->private = ppgtt;
return pd;
err_pd:
free_pd(&ppgtt->base.vm, pd);
return ERR_PTR(err);
}
void gen6_ppgtt_unpin(struct i915_ppgtt *base)
{
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
GEM_BUG_ON(!atomic_read(&ppgtt->pin_count));
if (atomic_dec_and_test(&ppgtt->pin_count))
i915_vma_unpin(ppgtt->vma);
}
struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt)
{
struct i915_ggtt * const ggtt = gt->ggtt;
struct gen6_ppgtt *ppgtt;
int err;
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
if (!ppgtt)
return ERR_PTR(-ENOMEM);
mutex_init(&ppgtt->flush);
ppgtt_init(&ppgtt->base, gt, 0);
ppgtt->base.vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen6_pte_t));
ppgtt->base.vm.top = 1;
ppgtt->base.vm.bind_async_flags = I915_VMA_LOCAL_BIND;
ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
ppgtt->base.vm.alloc_pt_dma = alloc_pt_dma;
ppgtt->base.vm.alloc_scratch_dma = alloc_pt_dma;
ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
err = gen6_ppgtt_init_scratch(ppgtt);
if (err)
goto err_put;
ppgtt->base.pd = gen6_alloc_top_pd(ppgtt);
if (IS_ERR(ppgtt->base.pd)) {
err = PTR_ERR(ppgtt->base.pd);
goto err_put;
}
return &ppgtt->base;
err_put:
i915_vm_put(&ppgtt->base.vm);
return ERR_PTR(err);
}
| linux-master | drivers/gpu/drm/i915/gt/gen6_ppgtt.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
#include <linux/seq_file.h>
#include <linux/string_helpers.h>
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_gt.h"
#include "intel_gt_clock_utils.h"
#include "intel_gt_debugfs.h"
#include "intel_gt_pm.h"
#include "intel_gt_pm_debugfs.h"
#include "intel_gt_regs.h"
#include "intel_llc.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
#include "intel_rc6.h"
#include "intel_rps.h"
#include "intel_runtime_pm.h"
#include "intel_uncore.h"
#include "vlv_sideband.h"
void intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt)
{
atomic_inc(>->user_wakeref);
intel_gt_pm_get(gt);
if (GRAPHICS_VER(gt->i915) >= 6)
intel_uncore_forcewake_user_get(gt->uncore);
}
void intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt)
{
if (GRAPHICS_VER(gt->i915) >= 6)
intel_uncore_forcewake_user_put(gt->uncore);
intel_gt_pm_put(gt);
atomic_dec(>->user_wakeref);
}
static int forcewake_user_open(struct inode *inode, struct file *file)
{
struct intel_gt *gt = inode->i_private;
intel_gt_pm_debugfs_forcewake_user_open(gt);
return 0;
}
static int forcewake_user_release(struct inode *inode, struct file *file)
{
struct intel_gt *gt = inode->i_private;
intel_gt_pm_debugfs_forcewake_user_release(gt);
return 0;
}
static const struct file_operations forcewake_user_fops = {
.owner = THIS_MODULE,
.open = forcewake_user_open,
.release = forcewake_user_release,
};
static int fw_domains_show(struct seq_file *m, void *data)
{
struct intel_gt *gt = m->private;
struct intel_uncore *uncore = gt->uncore;
struct intel_uncore_forcewake_domain *fw_domain;
unsigned int tmp;
seq_printf(m, "user.bypass_count = %u\n",
uncore->user_forcewake_count);
for_each_fw_domain(fw_domain, uncore, tmp)
seq_printf(m, "%s.wake_count = %u\n",
intel_uncore_forcewake_domain_to_str(fw_domain->id),
READ_ONCE(fw_domain->wake_count));
return 0;
}
DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(fw_domains);
static int vlv_drpc(struct seq_file *m)
{
struct intel_gt *gt = m->private;
struct intel_uncore *uncore = gt->uncore;
u32 rcctl1, pw_status, mt_fwake_req;
mt_fwake_req = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
pw_status = intel_uncore_read(uncore, VLV_GTLC_PW_STATUS);
rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
seq_printf(m, "RC6 Enabled: %s\n",
str_yes_no(rcctl1 & (GEN7_RC_CTL_TO_MODE |
GEN6_RC_CTL_EI_MODE(1))));
seq_printf(m, "Multi-threaded Forcewake Request: 0x%x\n", mt_fwake_req);
seq_printf(m, "Render Power Well: %s\n",
(pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
seq_printf(m, "Media Power Well: %s\n",
(pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
intel_rc6_print_residency(m, "Render RC6 residency since boot:", INTEL_RC6_RES_RC6);
intel_rc6_print_residency(m, "Media RC6 residency since boot:", INTEL_RC6_RES_VLV_MEDIA);
return fw_domains_show(m, NULL);
}
static int gen6_drpc(struct seq_file *m)
{
struct intel_gt *gt = m->private;
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
u32 gt_core_status, mt_fwake_req, rcctl1, rc6vids = 0;
u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
mt_fwake_req = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
gt_core_status = intel_uncore_read_fw(uncore, GEN6_GT_CORE_STATUS);
rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
if (GRAPHICS_VER(i915) >= 9) {
gen9_powergate_enable =
intel_uncore_read(uncore, GEN9_PG_ENABLE);
gen9_powergate_status =
intel_uncore_read(uncore, GEN9_PWRGT_DOMAIN_STATUS);
}
if (GRAPHICS_VER(i915) <= 7)
snb_pcode_read(gt->uncore, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL);
seq_printf(m, "RC1e Enabled: %s\n",
str_yes_no(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
seq_printf(m, "RC6 Enabled: %s\n",
str_yes_no(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
if (GRAPHICS_VER(i915) >= 9) {
seq_printf(m, "Render Well Gating Enabled: %s\n",
str_yes_no(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
seq_printf(m, "Media Well Gating Enabled: %s\n",
str_yes_no(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
}
seq_printf(m, "Deep RC6 Enabled: %s\n",
str_yes_no(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
seq_printf(m, "Deepest RC6 Enabled: %s\n",
str_yes_no(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
seq_puts(m, "Current RC state: ");
switch (gt_core_status & GEN6_RCn_MASK) {
case GEN6_RC0:
if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
seq_puts(m, "Core Power Down\n");
else
seq_puts(m, "on\n");
break;
case GEN6_RC3:
seq_puts(m, "RC3\n");
break;
case GEN6_RC6:
seq_puts(m, "RC6\n");
break;
case GEN6_RC7:
seq_puts(m, "RC7\n");
break;
default:
seq_puts(m, "Unknown\n");
break;
}
seq_printf(m, "Core Power Down: %s\n",
str_yes_no(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
seq_printf(m, "Multi-threaded Forcewake Request: 0x%x\n", mt_fwake_req);
if (GRAPHICS_VER(i915) >= 9) {
seq_printf(m, "Render Power Well: %s\n",
(gen9_powergate_status &
GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
seq_printf(m, "Media Power Well: %s\n",
(gen9_powergate_status &
GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
}
/* Not exactly sure what this is */
intel_rc6_print_residency(m, "RC6 \"Locked to RPn\" residency since boot:",
INTEL_RC6_RES_RC6_LOCKED);
intel_rc6_print_residency(m, "RC6 residency since boot:", INTEL_RC6_RES_RC6);
intel_rc6_print_residency(m, "RC6+ residency since boot:", INTEL_RC6_RES_RC6p);
intel_rc6_print_residency(m, "RC6++ residency since boot:", INTEL_RC6_RES_RC6pp);
if (GRAPHICS_VER(i915) <= 7) {
seq_printf(m, "RC6 voltage: %dmV\n",
GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
seq_printf(m, "RC6+ voltage: %dmV\n",
GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
seq_printf(m, "RC6++ voltage: %dmV\n",
GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
}
return fw_domains_show(m, NULL);
}
static int ilk_drpc(struct seq_file *m)
{
struct intel_gt *gt = m->private;
struct intel_uncore *uncore = gt->uncore;
u32 rgvmodectl, rstdbyctl;
u16 crstandvid;
rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
seq_printf(m, "HD boost: %s\n",
str_yes_no(rgvmodectl & MEMMODE_BOOST_EN));
seq_printf(m, "Boost freq: %d\n",
(rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
MEMMODE_BOOST_FREQ_SHIFT);
seq_printf(m, "HW control enabled: %s\n",
str_yes_no(rgvmodectl & MEMMODE_HWIDLE_EN));
seq_printf(m, "SW control enabled: %s\n",
str_yes_no(rgvmodectl & MEMMODE_SWMODE_EN));
seq_printf(m, "Gated voltage change: %s\n",
str_yes_no(rgvmodectl & MEMMODE_RCLK_GATE));
seq_printf(m, "Starting frequency: P%d\n",
(rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
seq_printf(m, "Max P-state: P%d\n",
(rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
seq_printf(m, "Render standby enabled: %s\n",
str_yes_no(!(rstdbyctl & RCX_SW_EXIT)));
seq_puts(m, "Current RS state: ");
switch (rstdbyctl & RSX_STATUS_MASK) {
case RSX_STATUS_ON:
seq_puts(m, "on\n");
break;
case RSX_STATUS_RC1:
seq_puts(m, "RC1\n");
break;
case RSX_STATUS_RC1E:
seq_puts(m, "RC1E\n");
break;
case RSX_STATUS_RS1:
seq_puts(m, "RS1\n");
break;
case RSX_STATUS_RS2:
seq_puts(m, "RS2 (RC6)\n");
break;
case RSX_STATUS_RS3:
seq_puts(m, "RC3 (RC6+)\n");
break;
default:
seq_puts(m, "unknown\n");
break;
}
return 0;
}
static int mtl_drpc(struct seq_file *m)
{
struct intel_gt *gt = m->private;
struct intel_uncore *uncore = gt->uncore;
u32 gt_core_status, rcctl1, mt_fwake_req;
u32 mtl_powergate_enable = 0, mtl_powergate_status = 0;
mt_fwake_req = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
gt_core_status = intel_uncore_read(uncore, MTL_MIRROR_TARGET_WP1);
rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
mtl_powergate_enable = intel_uncore_read(uncore, GEN9_PG_ENABLE);
mtl_powergate_status = intel_uncore_read(uncore,
GEN9_PWRGT_DOMAIN_STATUS);
seq_printf(m, "RC6 Enabled: %s\n",
str_yes_no(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
if (gt->type == GT_MEDIA) {
seq_printf(m, "Media Well Gating Enabled: %s\n",
str_yes_no(mtl_powergate_enable & GEN9_MEDIA_PG_ENABLE));
} else {
seq_printf(m, "Render Well Gating Enabled: %s\n",
str_yes_no(mtl_powergate_enable & GEN9_RENDER_PG_ENABLE));
}
seq_puts(m, "Current RC state: ");
switch (REG_FIELD_GET(MTL_CC_MASK, gt_core_status)) {
case MTL_CC0:
seq_puts(m, "RC0\n");
break;
case MTL_CC6:
seq_puts(m, "RC6\n");
break;
default:
MISSING_CASE(REG_FIELD_GET(MTL_CC_MASK, gt_core_status));
seq_puts(m, "Unknown\n");
break;
}
seq_printf(m, "Multi-threaded Forcewake Request: 0x%x\n", mt_fwake_req);
if (gt->type == GT_MEDIA)
seq_printf(m, "Media Power Well: %s\n",
(mtl_powergate_status &
GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
else
seq_printf(m, "Render Power Well: %s\n",
(mtl_powergate_status &
GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
/* Works for both render and media gt's */
intel_rc6_print_residency(m, "RC6 residency since boot:", INTEL_RC6_RES_RC6);
return fw_domains_show(m, NULL);
}
static int drpc_show(struct seq_file *m, void *unused)
{
struct intel_gt *gt = m->private;
struct drm_i915_private *i915 = gt->i915;
intel_wakeref_t wakeref;
int err = -ENODEV;
with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
err = mtl_drpc(m);
else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
err = vlv_drpc(m);
else if (GRAPHICS_VER(i915) >= 6)
err = gen6_drpc(m);
else
err = ilk_drpc(m);
}
return err;
}
DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(drpc);
void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p)
{
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
struct intel_rps *rps = >->rps;
intel_wakeref_t wakeref;
wakeref = intel_runtime_pm_get(uncore->rpm);
if (GRAPHICS_VER(i915) == 5) {
u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
drm_printf(p, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
drm_printf(p, "Requested VID: %d\n", rgvswctl & 0x3f);
drm_printf(p, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
MEMSTAT_VID_SHIFT);
drm_printf(p, "Current P-state: %d\n",
REG_FIELD_GET(MEMSTAT_PSTATE_MASK, rgvstat));
} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
u32 rpmodectl, freq_sts;
rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL);
drm_printf(p, "Video Turbo Mode: %s\n",
str_yes_no(rpmodectl & GEN6_RP_MEDIA_TURBO));
drm_printf(p, "HW control enabled: %s\n",
str_yes_no(rpmodectl & GEN6_RP_ENABLE));
drm_printf(p, "SW control enabled: %s\n",
str_yes_no((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == GEN6_RP_MEDIA_SW_MODE));
vlv_punit_get(i915);
freq_sts = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
vlv_punit_put(i915);
drm_printf(p, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
drm_printf(p, "DDR freq: %d MHz\n", i915->mem_freq);
drm_printf(p, "actual GPU freq: %d MHz\n",
intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
drm_printf(p, "current GPU freq: %d MHz\n",
intel_gpu_freq(rps, rps->cur_freq));
drm_printf(p, "max GPU freq: %d MHz\n",
intel_gpu_freq(rps, rps->max_freq));
drm_printf(p, "min GPU freq: %d MHz\n",
intel_gpu_freq(rps, rps->min_freq));
drm_printf(p, "idle GPU freq: %d MHz\n",
intel_gpu_freq(rps, rps->idle_freq));
drm_printf(p, "efficient (RPe) frequency: %d MHz\n",
intel_gpu_freq(rps, rps->efficient_freq));
} else if (GRAPHICS_VER(i915) >= 6) {
gen6_rps_frequency_dump(rps, p);
} else {
drm_puts(p, "no P-state info available\n");
}
drm_printf(p, "Current CD clock frequency: %d kHz\n", i915->display.cdclk.hw.cdclk);
drm_printf(p, "Max CD clock frequency: %d kHz\n", i915->display.cdclk.max_cdclk_freq);
drm_printf(p, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq);
intel_runtime_pm_put(uncore->rpm, wakeref);
}
static int frequency_show(struct seq_file *m, void *unused)
{
struct intel_gt *gt = m->private;
struct drm_printer p = drm_seq_file_printer(m);
intel_gt_pm_frequency_dump(gt, &p);
return 0;
}
DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(frequency);
static int llc_show(struct seq_file *m, void *data)
{
struct intel_gt *gt = m->private;
struct drm_i915_private *i915 = gt->i915;
const bool edram = GRAPHICS_VER(i915) > 8;
struct intel_rps *rps = >->rps;
unsigned int max_gpu_freq, min_gpu_freq;
intel_wakeref_t wakeref;
int gpu_freq, ia_freq;
seq_printf(m, "LLC: %s\n", str_yes_no(HAS_LLC(i915)));
seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
i915->edram_size_mb);
min_gpu_freq = rps->min_freq;
max_gpu_freq = rps->max_freq;
if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) {
/* Convert GT frequency to 50 HZ units */
min_gpu_freq /= GEN9_FREQ_SCALER;
max_gpu_freq /= GEN9_FREQ_SCALER;
}
seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
ia_freq = gpu_freq;
snb_pcode_read(gt->uncore, GEN6_PCODE_READ_MIN_FREQ_TABLE,
&ia_freq, NULL);
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
intel_gpu_freq(rps,
(gpu_freq *
(IS_GEN9_BC(i915) ||
GRAPHICS_VER(i915) >= 11 ?
GEN9_FREQ_SCALER : 1))),
((ia_freq >> 0) & 0xff) * 100,
((ia_freq >> 8) & 0xff) * 100);
}
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
return 0;
}
static bool llc_eval(void *data)
{
struct intel_gt *gt = data;
return HAS_LLC(gt->i915);
}
DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(llc);
static const char *rps_power_to_str(unsigned int power)
{
static const char * const strings[] = {
[LOW_POWER] = "low power",
[BETWEEN] = "mixed",
[HIGH_POWER] = "high power",
};
if (power >= ARRAY_SIZE(strings) || !strings[power])
return "unknown";
return strings[power];
}
static int rps_boost_show(struct seq_file *m, void *data)
{
struct intel_gt *gt = m->private;
struct drm_i915_private *i915 = gt->i915;
struct intel_rps *rps = >->rps;
seq_printf(m, "RPS enabled? %s\n",
str_yes_no(intel_rps_is_enabled(rps)));
seq_printf(m, "RPS active? %s\n",
str_yes_no(intel_rps_is_active(rps)));
seq_printf(m, "GPU busy? %s, %llums\n",
str_yes_no(gt->awake),
ktime_to_ms(intel_gt_get_awake_time(gt)));
seq_printf(m, "Boosts outstanding? %d\n",
atomic_read(&rps->num_waiters));
seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
seq_printf(m, "Frequency requested %d, actual %d\n",
intel_gpu_freq(rps, rps->cur_freq),
intel_rps_read_actual_frequency(rps));
seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
intel_gpu_freq(rps, rps->min_freq),
intel_gpu_freq(rps, rps->min_freq_softlimit),
intel_gpu_freq(rps, rps->max_freq_softlimit),
intel_gpu_freq(rps, rps->max_freq));
seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
intel_gpu_freq(rps, rps->idle_freq),
intel_gpu_freq(rps, rps->efficient_freq),
intel_gpu_freq(rps, rps->boost_freq));
seq_printf(m, "Wait boosts: %d\n", READ_ONCE(rps->boosts));
if (GRAPHICS_VER(i915) >= 6 && intel_rps_is_active(rps)) {
struct intel_uncore *uncore = gt->uncore;
u32 rpup, rpupei;
u32 rpdown, rpdownei;
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
rpup = intel_uncore_read_fw(uncore, GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
rpupei = intel_uncore_read_fw(uncore, GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
rpdown = intel_uncore_read_fw(uncore, GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
rpdownei = intel_uncore_read_fw(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
rps_power_to_str(rps->power.mode));
seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
rpup && rpupei ? 100 * rpup / rpupei : 0,
rps->power.up_threshold);
seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
rps->power.down_threshold);
} else {
seq_puts(m, "\nRPS Autotuning inactive\n");
}
return 0;
}
static bool rps_eval(void *data)
{
struct intel_gt *gt = data;
if (intel_guc_slpc_is_used(>->uc.guc))
return false;
else
return HAS_RPS(gt->i915);
}
DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(rps_boost);
static int perf_limit_reasons_get(void *data, u64 *val)
{
struct intel_gt *gt = data;
intel_wakeref_t wakeref;
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
*val = intel_uncore_read(gt->uncore, intel_gt_perf_limit_reasons_reg(gt));
return 0;
}
static int perf_limit_reasons_clear(void *data, u64 val)
{
struct intel_gt *gt = data;
intel_wakeref_t wakeref;
/*
* Clear the upper 16 "log" bits, the lower 16 "status" bits are
* read-only. The upper 16 "log" bits are identical to the lower 16
* "status" bits except that the "log" bits remain set until cleared.
*/
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
intel_uncore_rmw(gt->uncore, intel_gt_perf_limit_reasons_reg(gt),
GT0_PERF_LIMIT_REASONS_LOG_MASK, 0);
return 0;
}
static bool perf_limit_reasons_eval(void *data)
{
struct intel_gt *gt = data;
return i915_mmio_reg_valid(intel_gt_perf_limit_reasons_reg(gt));
}
DEFINE_SIMPLE_ATTRIBUTE(perf_limit_reasons_fops, perf_limit_reasons_get,
perf_limit_reasons_clear, "0x%llx\n");
void intel_gt_pm_debugfs_register(struct intel_gt *gt, struct dentry *root)
{
static const struct intel_gt_debugfs_file files[] = {
{ "drpc", &drpc_fops, NULL },
{ "frequency", &frequency_fops, NULL },
{ "forcewake", &fw_domains_fops, NULL },
{ "forcewake_user", &forcewake_user_fops, NULL},
{ "llc", &llc_fops, llc_eval },
{ "rps_boost", &rps_boost_fops, rps_eval },
{ "perf_limit_reasons", &perf_limit_reasons_fops, perf_limit_reasons_eval },
};
intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
}
| linux-master | drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2014-2018 Intel Corporation
*/
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_object.h"
#include "i915_drv.h"
#include "intel_engine_pm.h"
#include "intel_gt_buffer_pool.h"
static struct list_head *
bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz)
{
int n;
/*
* Compute a power-of-two bucket, but throw everything greater than
* 16KiB into the same bucket: i.e. the buckets hold objects of
* (1 page, 2 pages, 4 pages, 8+ pages).
*/
n = fls(sz >> PAGE_SHIFT) - 1;
if (n >= ARRAY_SIZE(pool->cache_list))
n = ARRAY_SIZE(pool->cache_list) - 1;
return &pool->cache_list[n];
}
static void node_free(struct intel_gt_buffer_pool_node *node)
{
i915_gem_object_put(node->obj);
i915_active_fini(&node->active);
kfree_rcu(node, rcu);
}
static bool pool_free_older_than(struct intel_gt_buffer_pool *pool, long keep)
{
struct intel_gt_buffer_pool_node *node, *stale = NULL;
bool active = false;
int n;
/* Free buffers that have not been used in the past second */
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
struct list_head *list = &pool->cache_list[n];
if (list_empty(list))
continue;
if (spin_trylock_irq(&pool->lock)) {
struct list_head *pos;
/* Most recent at head; oldest at tail */
list_for_each_prev(pos, list) {
unsigned long age;
node = list_entry(pos, typeof(*node), link);
age = READ_ONCE(node->age);
if (!age || jiffies - age < keep)
break;
/* Check we are the first to claim this node */
if (!xchg(&node->age, 0))
break;
node->free = stale;
stale = node;
}
if (!list_is_last(pos, list))
__list_del_many(pos, list);
spin_unlock_irq(&pool->lock);
}
active |= !list_empty(list);
}
while ((node = stale)) {
stale = stale->free;
node_free(node);
}
return active;
}
static void pool_free_work(struct work_struct *wrk)
{
struct intel_gt_buffer_pool *pool =
container_of(wrk, typeof(*pool), work.work);
struct intel_gt *gt = container_of(pool, struct intel_gt, buffer_pool);
if (pool_free_older_than(pool, HZ))
queue_delayed_work(gt->i915->unordered_wq, &pool->work,
round_jiffies_up_relative(HZ));
}
static void pool_retire(struct i915_active *ref)
{
struct intel_gt_buffer_pool_node *node =
container_of(ref, typeof(*node), active);
struct intel_gt_buffer_pool *pool = node->pool;
struct intel_gt *gt = container_of(pool, struct intel_gt, buffer_pool);
struct list_head *list = bucket_for_size(pool, node->obj->base.size);
unsigned long flags;
if (node->pinned) {
i915_gem_object_unpin_pages(node->obj);
/* Return this object to the shrinker pool */
i915_gem_object_make_purgeable(node->obj);
node->pinned = false;
}
GEM_BUG_ON(node->age);
spin_lock_irqsave(&pool->lock, flags);
list_add_rcu(&node->link, list);
WRITE_ONCE(node->age, jiffies ?: 1); /* 0 reserved for active nodes */
spin_unlock_irqrestore(&pool->lock, flags);
queue_delayed_work(gt->i915->unordered_wq, &pool->work,
round_jiffies_up_relative(HZ));
}
void intel_gt_buffer_pool_mark_used(struct intel_gt_buffer_pool_node *node)
{
assert_object_held(node->obj);
if (node->pinned)
return;
__i915_gem_object_pin_pages(node->obj);
/* Hide this pinned object from the shrinker until retired */
i915_gem_object_make_unshrinkable(node->obj);
node->pinned = true;
}
static struct intel_gt_buffer_pool_node *
node_create(struct intel_gt_buffer_pool *pool, size_t sz,
enum i915_map_type type)
{
struct intel_gt *gt = container_of(pool, struct intel_gt, buffer_pool);
struct intel_gt_buffer_pool_node *node;
struct drm_i915_gem_object *obj;
node = kmalloc(sizeof(*node),
GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (!node)
return ERR_PTR(-ENOMEM);
node->age = 0;
node->pool = pool;
node->pinned = false;
i915_active_init(&node->active, NULL, pool_retire, 0);
obj = i915_gem_object_create_internal(gt->i915, sz);
if (IS_ERR(obj)) {
i915_active_fini(&node->active);
kfree(node);
return ERR_CAST(obj);
}
i915_gem_object_set_readonly(obj);
node->type = type;
node->obj = obj;
return node;
}
struct intel_gt_buffer_pool_node *
intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size,
enum i915_map_type type)
{
struct intel_gt_buffer_pool *pool = >->buffer_pool;
struct intel_gt_buffer_pool_node *node;
struct list_head *list;
int ret;
size = PAGE_ALIGN(size);
list = bucket_for_size(pool, size);
rcu_read_lock();
list_for_each_entry_rcu(node, list, link) {
unsigned long age;
if (node->obj->base.size < size)
continue;
if (node->type != type)
continue;
age = READ_ONCE(node->age);
if (!age)
continue;
if (cmpxchg(&node->age, age, 0) == age) {
spin_lock_irq(&pool->lock);
list_del_rcu(&node->link);
spin_unlock_irq(&pool->lock);
break;
}
}
rcu_read_unlock();
if (&node->link == list) {
node = node_create(pool, size, type);
if (IS_ERR(node))
return node;
}
ret = i915_active_acquire(&node->active);
if (ret) {
node_free(node);
return ERR_PTR(ret);
}
return node;
}
void intel_gt_init_buffer_pool(struct intel_gt *gt)
{
struct intel_gt_buffer_pool *pool = >->buffer_pool;
int n;
spin_lock_init(&pool->lock);
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
INIT_LIST_HEAD(&pool->cache_list[n]);
INIT_DELAYED_WORK(&pool->work, pool_free_work);
}
void intel_gt_flush_buffer_pool(struct intel_gt *gt)
{
struct intel_gt_buffer_pool *pool = >->buffer_pool;
do {
while (pool_free_older_than(pool, 0))
;
} while (cancel_delayed_work_sync(&pool->work));
}
void intel_gt_fini_buffer_pool(struct intel_gt *gt)
{
struct intel_gt_buffer_pool *pool = >->buffer_pool;
int n;
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
GEM_BUG_ON(!list_empty(&pool->cache_list[n]));
}
| linux-master | drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2014-2018 Intel Corporation
*/
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
#include "intel_engine_regs.h"
#include "intel_gpu_commands.h"
#include "intel_gt.h"
#include "intel_gt_mcr.h"
#include "intel_gt_regs.h"
#include "intel_ring.h"
#include "intel_workarounds.h"
/**
* DOC: Hardware workarounds
*
* Hardware workarounds are register programming documented to be executed in
* the driver that fall outside of the normal programming sequences for a
* platform. There are some basic categories of workarounds, depending on
* how/when they are applied:
*
* - Context workarounds: workarounds that touch registers that are
* saved/restored to/from the HW context image. The list is emitted (via Load
* Register Immediate commands) once when initializing the device and saved in
* the default context. That default context is then used on every context
* creation to have a "primed golden context", i.e. a context image that
* already contains the changes needed to all the registers.
*
* Context workarounds should be implemented in the \*_ctx_workarounds_init()
* variants respective to the targeted platforms.
*
* - Engine workarounds: the list of these WAs is applied whenever the specific
* engine is reset. It's also possible that a set of engine classes share a
* common power domain and they are reset together. This happens on some
* platforms with render and compute engines. In this case (at least) one of
* them need to keeep the workaround programming: the approach taken in the
* driver is to tie those workarounds to the first compute/render engine that
* is registered. When executing with GuC submission, engine resets are
* outside of kernel driver control, hence the list of registers involved in
* written once, on engine initialization, and then passed to GuC, that
* saves/restores their values before/after the reset takes place. See
* ``drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c`` for reference.
*
* Workarounds for registers specific to RCS and CCS should be implemented in
* rcs_engine_wa_init() and ccs_engine_wa_init(), respectively; those for
* registers belonging to BCS, VCS or VECS should be implemented in
* xcs_engine_wa_init(). Workarounds for registers not belonging to a specific
* engine's MMIO range but that are part of of the common RCS/CCS reset domain
* should be implemented in general_render_compute_wa_init().
*
* - GT workarounds: the list of these WAs is applied whenever these registers
* revert to their default values: on GPU reset, suspend/resume [1]_, etc.
*
* GT workarounds should be implemented in the \*_gt_workarounds_init()
* variants respective to the targeted platforms.
*
* - Register whitelist: some workarounds need to be implemented in userspace,
* but need to touch privileged registers. The whitelist in the kernel
* instructs the hardware to allow the access to happen. From the kernel side,
* this is just a special case of a MMIO workaround (as we write the list of
* these to/be-whitelisted registers to some special HW registers).
*
* Register whitelisting should be done in the \*_whitelist_build() variants
* respective to the targeted platforms.
*
* - Workaround batchbuffers: buffers that get executed automatically by the
* hardware on every HW context restore. These buffers are created and
* programmed in the default context so the hardware always go through those
* programming sequences when switching contexts. The support for workaround
* batchbuffers is enabled these hardware mechanisms:
*
* #. INDIRECT_CTX: A batchbuffer and an offset are provided in the default
* context, pointing the hardware to jump to that location when that offset
* is reached in the context restore. Workaround batchbuffer in the driver
* currently uses this mechanism for all platforms.
*
* #. BB_PER_CTX_PTR: A batchbuffer is provided in the default context,
* pointing the hardware to a buffer to continue executing after the
* engine registers are restored in a context restore sequence. This is
* currently not used in the driver.
*
* - Other: There are WAs that, due to their nature, cannot be applied from a
* central place. Those are peppered around the rest of the code, as needed.
* Workarounds related to the display IP are the main example.
*
* .. [1] Technically, some registers are powercontext saved & restored, so they
* survive a suspend/resume. In practice, writing them again is not too
* costly and simplifies things, so it's the approach taken in the driver.
*/
static void wa_init_start(struct i915_wa_list *wal, struct intel_gt *gt,
const char *name, const char *engine_name)
{
wal->gt = gt;
wal->name = name;
wal->engine_name = engine_name;
}
#define WA_LIST_CHUNK (1 << 4)
static void wa_init_finish(struct i915_wa_list *wal)
{
/* Trim unused entries. */
if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) {
struct i915_wa *list = kmemdup(wal->list,
wal->count * sizeof(*list),
GFP_KERNEL);
if (list) {
kfree(wal->list);
wal->list = list;
}
}
if (!wal->count)
return;
drm_dbg(&wal->gt->i915->drm, "Initialized %u %s workarounds on %s\n",
wal->wa_count, wal->name, wal->engine_name);
}
static enum forcewake_domains
wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
{
enum forcewake_domains fw = 0;
struct i915_wa *wa;
unsigned int i;
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
fw |= intel_uncore_forcewake_for_reg(uncore,
wa->reg,
FW_REG_READ |
FW_REG_WRITE);
return fw;
}
static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
{
unsigned int addr = i915_mmio_reg_offset(wa->reg);
struct drm_i915_private *i915 = wal->gt->i915;
unsigned int start = 0, end = wal->count;
const unsigned int grow = WA_LIST_CHUNK;
struct i915_wa *wa_;
GEM_BUG_ON(!is_power_of_2(grow));
if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
struct i915_wa *list;
list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
GFP_KERNEL);
if (!list) {
drm_err(&i915->drm, "No space for workaround init!\n");
return;
}
if (wal->list) {
memcpy(list, wal->list, sizeof(*wa) * wal->count);
kfree(wal->list);
}
wal->list = list;
}
while (start < end) {
unsigned int mid = start + (end - start) / 2;
if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) {
start = mid + 1;
} else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) {
end = mid;
} else {
wa_ = &wal->list[mid];
if ((wa->clr | wa_->clr) && !(wa->clr & ~wa_->clr)) {
drm_err(&i915->drm,
"Discarding overwritten w/a for reg %04x (clear: %08x, set: %08x)\n",
i915_mmio_reg_offset(wa_->reg),
wa_->clr, wa_->set);
wa_->set &= ~wa->clr;
}
wal->wa_count++;
wa_->set |= wa->set;
wa_->clr |= wa->clr;
wa_->read |= wa->read;
return;
}
}
wal->wa_count++;
wa_ = &wal->list[wal->count++];
*wa_ = *wa;
while (wa_-- > wal->list) {
GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) ==
i915_mmio_reg_offset(wa_[1].reg));
if (i915_mmio_reg_offset(wa_[1].reg) >
i915_mmio_reg_offset(wa_[0].reg))
break;
swap(wa_[1], wa_[0]);
}
}
static void wa_add(struct i915_wa_list *wal, i915_reg_t reg,
u32 clear, u32 set, u32 read_mask, bool masked_reg)
{
struct i915_wa wa = {
.reg = reg,
.clr = clear,
.set = set,
.read = read_mask,
.masked_reg = masked_reg,
};
_wa_add(wal, &wa);
}
static void wa_mcr_add(struct i915_wa_list *wal, i915_mcr_reg_t reg,
u32 clear, u32 set, u32 read_mask, bool masked_reg)
{
struct i915_wa wa = {
.mcr_reg = reg,
.clr = clear,
.set = set,
.read = read_mask,
.masked_reg = masked_reg,
.is_mcr = 1,
};
_wa_add(wal, &wa);
}
static void
wa_write_clr_set(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
{
wa_add(wal, reg, clear, set, clear | set, false);
}
static void
wa_mcr_write_clr_set(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 clear, u32 set)
{
wa_mcr_add(wal, reg, clear, set, clear | set, false);
}
static void
wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
{
wa_write_clr_set(wal, reg, ~0, set);
}
static void
wa_mcr_write(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 set)
{
wa_mcr_write_clr_set(wal, reg, ~0, set);
}
static void
wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
{
wa_write_clr_set(wal, reg, set, set);
}
static void
wa_mcr_write_or(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 set)
{
wa_mcr_write_clr_set(wal, reg, set, set);
}
static void
wa_write_clr(struct i915_wa_list *wal, i915_reg_t reg, u32 clr)
{
wa_write_clr_set(wal, reg, clr, 0);
}
static void
wa_mcr_write_clr(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 clr)
{
wa_mcr_write_clr_set(wal, reg, clr, 0);
}
/*
* WA operations on "masked register". A masked register has the upper 16 bits
* documented as "masked" in b-spec. Its purpose is to allow writing to just a
* portion of the register without a rmw: you simply write in the upper 16 bits
* the mask of bits you are going to modify.
*
* The wa_masked_* family of functions already does the necessary operations to
* calculate the mask based on the parameters passed, so user only has to
* provide the lower 16 bits of that register.
*/
static void
wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
wa_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val, true);
}
static void
wa_mcr_masked_en(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 val)
{
wa_mcr_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val, true);
}
static void
wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val, true);
}
static void
wa_mcr_masked_dis(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 val)
{
wa_mcr_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val, true);
}
static void
wa_masked_field_set(struct i915_wa_list *wal, i915_reg_t reg,
u32 mask, u32 val)
{
wa_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask, true);
}
static void
wa_mcr_masked_field_set(struct i915_wa_list *wal, i915_mcr_reg_t reg,
u32 mask, u32 val)
{
wa_mcr_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask, true);
}
static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
}
static void gen7_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
}
static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
/* WaDisableAsyncFlipPerfMode:bdw,chv */
wa_masked_en(wal, RING_MI_MODE(RENDER_RING_BASE), ASYNC_FLIP_PERF_DISABLE);
/* WaDisablePartialInstShootdown:bdw,chv */
wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
/* Use Force Non-Coherent whenever executing a 3D context. This is a
* workaround for a possible hang in the unlikely event a TLB
* invalidation occurs during a PSD flush.
*/
/* WaForceEnableNonCoherent:bdw,chv */
/* WaHdcDisableFetchWhenMasked:bdw,chv */
wa_masked_en(wal, HDC_CHICKEN0,
HDC_DONOT_FETCH_MEM_WHEN_MASKED |
HDC_FORCE_NON_COHERENT);
/* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
* "The Hierarchical Z RAW Stall Optimization allows non-overlapping
* polygons in the same 8x4 pixel/sample area to be processed without
* stalling waiting for the earlier ones to write to Hierarchical Z
* buffer."
*
* This optimization is off by default for BDW and CHV; turn it on.
*/
wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
/* Wa4x4STCOptimizationDisable:bdw,chv */
wa_masked_en(wal, CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
/*
* BSpec recommends 8x4 when MSAA is used,
* however in practice 16x4 seems fastest.
*
* Note that PS/WM thread counts depend on the WIZ hashing
* disable bit, which we don't touch here, but it's good
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
wa_masked_field_set(wal, GEN7_GT_MODE,
GEN6_WIZ_HASHING_MASK,
GEN6_WIZ_HASHING_16x4);
}
static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
gen8_ctx_workarounds_init(engine, wal);
/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
/* WaDisableDopClockGating:bdw
*
* Also see the related UCGTCL1 write in bdw_init_clock_gating()
* to disable EUTC clock gating.
*/
wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
DOP_CLOCK_GATING_DISABLE);
wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
wa_masked_en(wal, HDC_CHICKEN0,
/* WaForceContextSaveRestoreNonCoherent:bdw */
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
/* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
(IS_BROADWELL_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
}
static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
gen8_ctx_workarounds_init(engine, wal);
/* WaDisableThreadStallDopClockGating:chv */
wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
/* Improve HiZ throughput on CHV. */
wa_masked_en(wal, HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
}
static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
if (HAS_LLC(i915)) {
/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
*
* Must match Display Engine. See
* WaCompressedResourceDisplayNewHashMode.
*/
wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
GEN9_PBE_COMPRESSED_HASH_SELECTION);
wa_mcr_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
}
/* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
/* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
FLOW_CONTROL_ENABLE |
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
wa_mcr_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
GEN9_ENABLE_YV12_BUGFIX |
GEN9_ENABLE_GPGPU_PREEMPTION);
/* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
/* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
wa_masked_en(wal, CACHE_MODE_1,
GEN8_4x4_STC_OPTIMIZATION_DISABLE |
GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
/* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
wa_mcr_masked_dis(wal, GEN9_HALF_SLICE_CHICKEN5,
GEN9_CCS_TLB_PREFETCH_ENABLE);
/* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
wa_masked_en(wal, HDC_CHICKEN0,
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
/* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
* both tied to WaForceContextSaveRestoreNonCoherent
* in some hsds for skl. We keep the tie for all gen9. The
* documentation is a bit hazy and so we want to get common behaviour,
* even though there is no clear evidence we would need both on kbl/bxt.
* This area has been source of system hangs so we play it safe
* and mimic the skl regardless of what bspec says.
*
* Use Force Non-Coherent whenever executing a 3D context. This
* is a workaround for a possible hang in the unlikely event
* a TLB invalidation occurs during a PSD flush.
*/
/* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
wa_masked_en(wal, HDC_CHICKEN0,
HDC_FORCE_NON_COHERENT);
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
if (IS_SKYLAKE(i915) ||
IS_KABYLAKE(i915) ||
IS_COFFEELAKE(i915) ||
IS_COMETLAKE(i915))
wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
/* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
wa_mcr_masked_en(wal, HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
/*
* Supporting preemption with fine-granularity requires changes in the
* batch buffer programming. Since we can't break old userspace, we
* need to set our default preemption level to safe value. Userspace is
* still able to use more fine-grained preemption levels, since in
* WaEnablePreemptionGranularityControlByUMD we're whitelisting the
* per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
* not real HW workarounds, but merely a way to start using preemption
* while maintaining old contract with userspace.
*/
/* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
wa_masked_dis(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
/* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
/* WaClearHIZ_WM_CHICKEN3:bxt,glk */
if (IS_GEN9_LP(i915))
wa_masked_en(wal, GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
}
static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
struct intel_gt *gt = engine->gt;
u8 vals[3] = { 0, 0, 0 };
unsigned int i;
for (i = 0; i < 3; i++) {
u8 ss;
/*
* Only consider slices where one, and only one, subslice has 7
* EUs
*/
if (!is_power_of_2(gt->info.sseu.subslice_7eu[i]))
continue;
/*
* subslice_7eu[i] != 0 (because of the check above) and
* ss_max == 4 (maximum number of subslices possible per slice)
*
* -> 0 <= ss <= 3;
*/
ss = ffs(gt->info.sseu.subslice_7eu[i]) - 1;
vals[i] = 3 - ss;
}
if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
return;
/* Tune IZ hashing. See intel_device_info_runtime_init() */
wa_masked_field_set(wal, GEN7_GT_MODE,
GEN9_IZ_HASHING_MASK(2) |
GEN9_IZ_HASHING_MASK(1) |
GEN9_IZ_HASHING_MASK(0),
GEN9_IZ_HASHING(2, vals[2]) |
GEN9_IZ_HASHING(1, vals[1]) |
GEN9_IZ_HASHING(0, vals[0]));
}
static void skl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
gen9_ctx_workarounds_init(engine, wal);
skl_tune_iz_hashing(engine, wal);
}
static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
gen9_ctx_workarounds_init(engine, wal);
/* WaDisableThreadStallDopClockGating:bxt */
wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
STALL_DOP_GATING_DISABLE);
/* WaToEnableHwFixForPushConstHWBug:bxt */
wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
}
static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
gen9_ctx_workarounds_init(engine, wal);
/* WaToEnableHwFixForPushConstHWBug:kbl */
if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, STEP_C0, STEP_FOREVER))
wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableSbeCacheDispatchPortSharing:kbl */
wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}
static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
gen9_ctx_workarounds_init(engine, wal);
/* WaToEnableHwFixForPushConstHWBug:glk */
wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
}
static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
gen9_ctx_workarounds_init(engine, wal);
/* WaToEnableHwFixForPushConstHWBug:cfl */
wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableSbeCacheDispatchPortSharing:cfl */
wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
}
static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
/* Wa_1406697149 (WaDisableBankHangMode:icl) */
wa_write(wal, GEN8_L3CNTLREG, GEN8_ERRDETBCTRL);
/* WaForceEnableNonCoherent:icl
* This is not the same workaround as in early Gen9 platforms, where
* lacking this could cause system hangs, but coherency performance
* overhead is high and only a few compute workloads really need it
* (the register is whitelisted in hardware now, so UMDs can opt in
* for coherency if they have a good reason).
*/
wa_mcr_masked_en(wal, ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
/* WaEnableFloatBlendOptimization:icl */
wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
_MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE),
0 /* write-only, so skip validation */,
true);
/* WaDisableGPGPUMidThreadPreemption:icl */
wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
/* allow headerless messages for preemptible GPGPU context */
wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
/* Wa_1604278689:icl,ehl */
wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID);
wa_write_clr_set(wal, IVB_FBC_RT_BASE_UPPER,
0,
0xFFFFFFFF);
/* Wa_1406306137:icl,ehl */
wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
}
/*
* These settings aren't actually workarounds, but general tuning settings that
* need to be programmed on dg2 platform.
*/
static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
wa_mcr_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP);
wa_mcr_write_clr_set(wal, XEHP_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f));
wa_mcr_write_clr_set(wal, XEHP_FF_MODE2, FF_MODE2_TDS_TIMER_MASK,
FF_MODE2_TDS_TIMER_128);
}
static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
/*
* Wa_1409142259:tgl,dg1,adl-p
* Wa_1409347922:tgl,dg1,adl-p
* Wa_1409252684:tgl,dg1,adl-p
* Wa_1409217633:tgl,dg1,adl-p
* Wa_1409207793:tgl,dg1,adl-p
* Wa_1409178076:tgl,dg1,adl-p
* Wa_1408979724:tgl,dg1,adl-p
* Wa_14010443199:tgl,rkl,dg1,adl-p
* Wa_14010698770:tgl,rkl,dg1,adl-s,adl-p
* Wa_1409342910:tgl,rkl,dg1,adl-s,adl-p
*/
wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
/* WaDisableGPGPUMidThreadPreemption:gen12 */
wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
GEN9_PREEMPT_GPGPU_LEVEL_MASK,
GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
/*
* Wa_16011163337 - GS_TIMER
*
* TDS_TIMER: Although some platforms refer to it as Wa_1604555607, we
* need to program it even on those that don't explicitly list that
* workaround.
*
* Note that the programming of GEN12_FF_MODE2 is further modified
* according to the FF_MODE2 guidance given by Wa_1608008084.
* Wa_1608008084 tells us the FF_MODE2 register will return the wrong
* value when read from the CPU.
*
* The default value for this register is zero for all fields.
* So instead of doing a RMW we should just write the desired values
* for TDS and GS timers. Note that since the readback can't be trusted,
* the clear mask is just set to ~0 to make sure other bits are not
* inadvertently set. For the same reason read verification is ignored.
*/
wa_add(wal,
GEN12_FF_MODE2,
~0,
FF_MODE2_TDS_TIMER_128 | FF_MODE2_GS_TIMER_224,
0, false);
if (!IS_DG1(i915)) {
/* Wa_1806527549 */
wa_masked_en(wal, HIZ_CHICKEN, HZ_DEPTH_TEST_LE_GE_OPT_DISABLE);
/* Wa_1606376872 */
wa_masked_en(wal, COMMON_SLICE_CHICKEN4, DISABLE_TDC_LOAD_BALANCING_CALC);
}
}
static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
gen12_ctx_workarounds_init(engine, wal);
/* Wa_1409044764 */
wa_masked_dis(wal, GEN11_COMMON_SLICE_CHICKEN3,
DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN);
/* Wa_22010493298 */
wa_masked_en(wal, HIZ_CHICKEN,
DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE);
}
static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
dg2_ctx_gt_tuning_init(engine, wal);
/* Wa_16011186671:dg2_g11 */
if (IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) {
wa_mcr_masked_dis(wal, VFLSKPD, DIS_MULT_MISS_RD_SQUASH);
wa_mcr_masked_en(wal, VFLSKPD, DIS_OVER_FETCH_CACHE);
}
if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) {
/* Wa_14010469329:dg2_g10 */
wa_mcr_masked_en(wal, XEHP_COMMON_SLICE_CHICKEN3,
XEHP_DUAL_SIMD8_SEQ_MERGE_DISABLE);
/*
* Wa_22010465075:dg2_g10
* Wa_22010613112:dg2_g10
* Wa_14010698770:dg2_g10
*/
wa_mcr_masked_en(wal, XEHP_COMMON_SLICE_CHICKEN3,
GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
}
/* Wa_16013271637:dg2 */
wa_mcr_masked_en(wal, XEHP_SLICE_COMMON_ECO_CHICKEN1,
MSC_MSAA_REODER_BUF_BYPASS_DISABLE);
/* Wa_14014947963:dg2 */
if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_FOREVER) ||
IS_DG2_G11(engine->i915) || IS_DG2_G12(engine->i915))
wa_masked_field_set(wal, VF_PREEMPTION, PREEMPTION_VERTEX_COUNT, 0x4000);
/* Wa_18018764978:dg2 */
if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_C0, STEP_FOREVER) ||
IS_DG2_G11(engine->i915) || IS_DG2_G12(engine->i915))
wa_mcr_masked_en(wal, XEHP_PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL);
/* Wa_15010599737:dg2 */
wa_mcr_masked_en(wal, CHICKEN_RASTER_1, DIS_SF_ROUND_NEAREST_EVEN);
/* Wa_18019271663:dg2 */
wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
}
static void mtl_ctx_gt_tuning_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
dg2_ctx_gt_tuning_init(engine, wal);
if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_B0, STEP_FOREVER) ||
IS_MTL_GRAPHICS_STEP(i915, P, STEP_B0, STEP_FOREVER))
wa_add(wal, DRAW_WATERMARK, VERT_WM_VAL, 0x3FF, 0, false);
}
static void mtl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
mtl_ctx_gt_tuning_init(engine, wal);
if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0)) {
/* Wa_14014947963 */
wa_masked_field_set(wal, VF_PREEMPTION,
PREEMPTION_VERTEX_COUNT, 0x4000);
/* Wa_16013271637 */
wa_mcr_masked_en(wal, XEHP_SLICE_COMMON_ECO_CHICKEN1,
MSC_MSAA_REODER_BUF_BYPASS_DISABLE);
/* Wa_18019627453 */
wa_mcr_masked_en(wal, VFLSKPD, VF_PREFETCH_TLB_DIS);
/* Wa_18018764978 */
wa_mcr_masked_en(wal, XEHP_PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL);
}
/* Wa_18019271663 */
wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
}
static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
/*
* This is a "fake" workaround defined by software to ensure we
* maintain reliable, backward-compatible behavior for userspace with
* regards to how nested MI_BATCH_BUFFER_START commands are handled.
*
* The per-context setting of MI_MODE[12] determines whether the bits
* of a nested MI_BATCH_BUFFER_START instruction should be interpreted
* in the traditional manner or whether they should instead use a new
* tgl+ meaning that breaks backward compatibility, but allows nesting
* into 3rd-level batchbuffers. When this new capability was first
* added in TGL, it remained off by default unless a context
* intentionally opted in to the new behavior. However Xe_HPG now
* flips this on by default and requires that we explicitly opt out if
* we don't want the new behavior.
*
* From a SW perspective, we want to maintain the backward-compatible
* behavior for userspace, so we'll apply a fake workaround to set it
* back to the legacy behavior on platforms where the hardware default
* is to break compatibility. At the moment there is no Linux
* userspace that utilizes third-level batchbuffers, so this will avoid
* userspace from needing to make any changes. using the legacy
* meaning is the correct thing to do. If/when we have userspace
* consumers that want to utilize third-level batch nesting, we can
* provide a context parameter to allow them to opt-in.
*/
wa_masked_dis(wal, RING_MI_MODE(engine->mmio_base), TGL_NESTED_BB_EN);
}
static void gen12_ctx_gt_mocs_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
u8 mocs;
/*
* Some blitter commands do not have a field for MOCS, those
* commands will use MOCS index pointed by BLIT_CCTL.
* BLIT_CCTL registers are needed to be programmed to un-cached.
*/
if (engine->class == COPY_ENGINE_CLASS) {
mocs = engine->gt->mocs.uc_index;
wa_write_clr_set(wal,
BLIT_CCTL(engine->mmio_base),
BLIT_CCTL_MASK,
BLIT_CCTL_MOCS(mocs, mocs));
}
}
/*
* gen12_ctx_gt_fake_wa_init() aren't programmingan official workaround
* defined by the hardware team, but it programming general context registers.
* Adding those context register programming in context workaround
* allow us to use the wa framework for proper application and validation.
*/
static void
gen12_ctx_gt_fake_wa_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
fakewa_disable_nestedbb_mode(engine, wal);
gen12_ctx_gt_mocs_init(engine, wal);
}
static void
__intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
struct i915_wa_list *wal,
const char *name)
{
struct drm_i915_private *i915 = engine->i915;
wa_init_start(wal, engine->gt, name, engine->name);
/* Applies to all engines */
/*
* Fake workarounds are not the actual workaround but
* programming of context registers using workaround framework.
*/
if (GRAPHICS_VER(i915) >= 12)
gen12_ctx_gt_fake_wa_init(engine, wal);
if (engine->class != RENDER_CLASS)
goto done;
if (IS_METEORLAKE(i915))
mtl_ctx_workarounds_init(engine, wal);
else if (IS_PONTEVECCHIO(i915))
; /* noop; none at this time */
else if (IS_DG2(i915))
dg2_ctx_workarounds_init(engine, wal);
else if (IS_XEHPSDV(i915))
; /* noop; none at this time */
else if (IS_DG1(i915))
dg1_ctx_workarounds_init(engine, wal);
else if (GRAPHICS_VER(i915) == 12)
gen12_ctx_workarounds_init(engine, wal);
else if (GRAPHICS_VER(i915) == 11)
icl_ctx_workarounds_init(engine, wal);
else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
cfl_ctx_workarounds_init(engine, wal);
else if (IS_GEMINILAKE(i915))
glk_ctx_workarounds_init(engine, wal);
else if (IS_KABYLAKE(i915))
kbl_ctx_workarounds_init(engine, wal);
else if (IS_BROXTON(i915))
bxt_ctx_workarounds_init(engine, wal);
else if (IS_SKYLAKE(i915))
skl_ctx_workarounds_init(engine, wal);
else if (IS_CHERRYVIEW(i915))
chv_ctx_workarounds_init(engine, wal);
else if (IS_BROADWELL(i915))
bdw_ctx_workarounds_init(engine, wal);
else if (GRAPHICS_VER(i915) == 7)
gen7_ctx_workarounds_init(engine, wal);
else if (GRAPHICS_VER(i915) == 6)
gen6_ctx_workarounds_init(engine, wal);
else if (GRAPHICS_VER(i915) < 8)
;
else
MISSING_CASE(GRAPHICS_VER(i915));
done:
wa_init_finish(wal);
}
void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
{
__intel_engine_init_ctx_wa(engine, &engine->ctx_wa_list, "context");
}
int intel_engine_emit_ctx_wa(struct i915_request *rq)
{
struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
struct intel_uncore *uncore = rq->engine->uncore;
enum forcewake_domains fw;
unsigned long flags;
struct i915_wa *wa;
unsigned int i;
u32 *cs;
int ret;
if (wal->count == 0)
return 0;
ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
if (ret)
return ret;
cs = intel_ring_begin(rq, (wal->count * 2 + 2));
if (IS_ERR(cs))
return PTR_ERR(cs);
fw = wal_get_fw_for_rmw(uncore, wal);
intel_gt_mcr_lock(wal->gt, &flags);
spin_lock(&uncore->lock);
intel_uncore_forcewake_get__locked(uncore, fw);
*cs++ = MI_LOAD_REGISTER_IMM(wal->count);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
u32 val;
/* Skip reading the register if it's not really needed */
if (wa->masked_reg || (wa->clr | wa->set) == U32_MAX) {
val = wa->set;
} else {
val = wa->is_mcr ?
intel_gt_mcr_read_any_fw(wal->gt, wa->mcr_reg) :
intel_uncore_read_fw(uncore, wa->reg);
val &= ~wa->clr;
val |= wa->set;
}
*cs++ = i915_mmio_reg_offset(wa->reg);
*cs++ = val;
}
*cs++ = MI_NOOP;
intel_uncore_forcewake_put__locked(uncore, fw);
spin_unlock(&uncore->lock);
intel_gt_mcr_unlock(wal->gt, flags);
intel_ring_advance(rq, cs);
ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
if (ret)
return ret;
return 0;
}
static void
gen4_gt_workarounds_init(struct intel_gt *gt,
struct i915_wa_list *wal)
{
/* WaDisable_RenderCache_OperationalFlush:gen4,ilk */
wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
}
static void
g4x_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
gen4_gt_workarounds_init(gt, wal);
/* WaDisableRenderCachePipelinedFlush:g4x,ilk */
wa_masked_en(wal, CACHE_MODE_0, CM0_PIPELINED_RENDER_FLUSH_DISABLE);
}
static void
ilk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
g4x_gt_workarounds_init(gt, wal);
wa_masked_en(wal, _3D_CHICKEN2, _3D_CHICKEN2_WM_READ_PIPELINED);
}
static void
snb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
}
static void
ivb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
/* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
wa_masked_dis(wal,
GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
/* WaApplyL3ControlAndL3ChickenMode:ivb */
wa_write(wal, GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
wa_write(wal, GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
/* WaForceL3Serialization:ivb */
wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
}
static void
vlv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
/* WaForceL3Serialization:vlv */
wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
/*
* WaIncreaseL3CreditsForVLVB0:vlv
* This is the hardware default actually.
*/
wa_write(wal, GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
}
static void
hsw_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
/* L3 caching of data atomics doesn't work -- disable it. */
wa_write(wal, HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
wa_add(wal,
HSW_ROW_CHICKEN3, 0,
_MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
0 /* XXX does this reg exist? */, true);
/* WaVSRefCountFullforceMissDisable:hsw */
wa_write_clr(wal, GEN7_FF_THREAD_MODE, GEN7_FF_VS_REF_CNT_FFME);
}
static void
gen9_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
const struct sseu_dev_info *sseu = &to_gt(i915)->info.sseu;
unsigned int slice, subslice;
u32 mcr, mcr_mask;
GEM_BUG_ON(GRAPHICS_VER(i915) != 9);
/*
* WaProgramMgsrForCorrectSliceSpecificMmioReads:gen9,glk,kbl,cml
* Before any MMIO read into slice/subslice specific registers, MCR
* packet control register needs to be programmed to point to any
* enabled s/ss pair. Otherwise, incorrect values will be returned.
* This means each subsequent MMIO read will be forwarded to an
* specific s/ss combination, but this is OK since these registers
* are consistent across s/ss in almost all cases. In the rare
* occasions, such as INSTDONE, where this value is dependent
* on s/ss combo, the read should be done with read_subslice_reg.
*/
slice = ffs(sseu->slice_mask) - 1;
GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask.hsw));
subslice = ffs(intel_sseu_get_hsw_subslices(sseu, slice));
GEM_BUG_ON(!subslice);
subslice--;
/*
* We use GEN8_MCR..() macros to calculate the |mcr| value for
* Gen9 to address WaProgramMgsrForCorrectSliceSpecificMmioReads
*/
mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
drm_dbg(&i915->drm, "MCR slice:%d/subslice:%d = %x\n", slice, subslice, mcr);
wa_write_clr_set(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
}
static void
gen9_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = gt->i915;
/* WaProgramMgsrForCorrectSliceSpecificMmioReads:glk,kbl,cml,gen9 */
gen9_wa_init_mcr(i915, wal);
/* WaDisableKillLogic:bxt,skl,kbl */
if (!IS_COFFEELAKE(i915) && !IS_COMETLAKE(i915))
wa_write_or(wal,
GAM_ECOCHK,
ECOCHK_DIS_TLB);
if (HAS_LLC(i915)) {
/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
*
* Must match Display Engine. See
* WaCompressedResourceDisplayNewHashMode.
*/
wa_write_or(wal,
MMCD_MISC_CTRL,
MMCD_PCLA | MMCD_HOTSPOT_EN);
}
/* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
wa_write_or(wal,
GAM_ECOCHK,
BDW_DISABLE_HDC_INVALIDATION);
}
static void
skl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
gen9_gt_workarounds_init(gt, wal);
/* WaDisableGafsUnitClkGating:skl */
wa_write_or(wal,
GEN7_UCGCTL4,
GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:skl */
if (IS_SKYLAKE(gt->i915) && IS_GRAPHICS_STEP(gt->i915, STEP_A0, STEP_H0))
wa_write_or(wal,
GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
static void
kbl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
gen9_gt_workarounds_init(gt, wal);
/* WaDisableDynamicCreditSharing:kbl */
if (IS_KABYLAKE(gt->i915) && IS_GRAPHICS_STEP(gt->i915, 0, STEP_C0))
wa_write_or(wal,
GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
/* WaDisableGafsUnitClkGating:kbl */
wa_write_or(wal,
GEN7_UCGCTL4,
GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:kbl */
wa_write_or(wal,
GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
static void
glk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
gen9_gt_workarounds_init(gt, wal);
}
static void
cfl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
gen9_gt_workarounds_init(gt, wal);
/* WaDisableGafsUnitClkGating:cfl */
wa_write_or(wal,
GEN7_UCGCTL4,
GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:cfl */
wa_write_or(wal,
GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
static void __set_mcr_steering(struct i915_wa_list *wal,
i915_reg_t steering_reg,
unsigned int slice, unsigned int subslice)
{
u32 mcr, mcr_mask;
mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
wa_write_clr_set(wal, steering_reg, mcr_mask, mcr);
}
static void debug_dump_steering(struct intel_gt *gt)
{
struct drm_printer p = drm_debug_printer("MCR Steering:");
if (drm_debug_enabled(DRM_UT_DRIVER))
intel_gt_mcr_report_steering(&p, gt, false);
}
static void __add_mcr_wa(struct intel_gt *gt, struct i915_wa_list *wal,
unsigned int slice, unsigned int subslice)
{
__set_mcr_steering(wal, GEN8_MCR_SELECTOR, slice, subslice);
gt->default_steering.groupid = slice;
gt->default_steering.instanceid = subslice;
debug_dump_steering(gt);
}
static void
icl_wa_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
{
const struct sseu_dev_info *sseu = >->info.sseu;
unsigned int subslice;
GEM_BUG_ON(GRAPHICS_VER(gt->i915) < 11);
GEM_BUG_ON(hweight8(sseu->slice_mask) > 1);
/*
* Although a platform may have subslices, we need to always steer
* reads to the lowest instance that isn't fused off. When Render
* Power Gating is enabled, grabbing forcewake will only power up a
* single subslice (the "minconfig") if there isn't a real workload
* that needs to be run; this means that if we steer register reads to
* one of the higher subslices, we run the risk of reading back 0's or
* random garbage.
*/
subslice = __ffs(intel_sseu_get_hsw_subslices(sseu, 0));
/*
* If the subslice we picked above also steers us to a valid L3 bank,
* then we can just rely on the default steering and won't need to
* worry about explicitly re-steering L3BANK reads later.
*/
if (gt->info.l3bank_mask & BIT(subslice))
gt->steering_table[L3BANK] = NULL;
__add_mcr_wa(gt, wal, 0, subslice);
}
static void
xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
{
const struct sseu_dev_info *sseu = >->info.sseu;
unsigned long slice, subslice = 0, slice_mask = 0;
u32 lncf_mask = 0;
int i;
/*
* On Xe_HP the steering increases in complexity. There are now several
* more units that require steering and we're not guaranteed to be able
* to find a common setting for all of them. These are:
* - GSLICE (fusable)
* - DSS (sub-unit within gslice; fusable)
* - L3 Bank (fusable)
* - MSLICE (fusable)
* - LNCF (sub-unit within mslice; always present if mslice is present)
*
* We'll do our default/implicit steering based on GSLICE (in the
* sliceid field) and DSS (in the subsliceid field). If we can
* find overlap between the valid MSLICE and/or LNCF values with
* a suitable GSLICE, then we can just re-use the default value and
* skip and explicit steering at runtime.
*
* We only need to look for overlap between GSLICE/MSLICE/LNCF to find
* a valid sliceid value. DSS steering is the only type of steering
* that utilizes the 'subsliceid' bits.
*
* Also note that, even though the steering domain is called "GSlice"
* and it is encoded in the register using the gslice format, the spec
* says that the combined (geometry | compute) fuse should be used to
* select the steering.
*/
/* Find the potential gslice candidates */
slice_mask = intel_slicemask_from_xehp_dssmask(sseu->subslice_mask,
GEN_DSS_PER_GSLICE);
/*
* Find the potential LNCF candidates. Either LNCF within a valid
* mslice is fine.
*/
for_each_set_bit(i, >->info.mslice_mask, GEN12_MAX_MSLICES)
lncf_mask |= (0x3 << (i * 2));
/*
* Are there any sliceid values that work for both GSLICE and LNCF
* steering?
*/
if (slice_mask & lncf_mask) {
slice_mask &= lncf_mask;
gt->steering_table[LNCF] = NULL;
}
/* How about sliceid values that also work for MSLICE steering? */
if (slice_mask & gt->info.mslice_mask) {
slice_mask &= gt->info.mslice_mask;
gt->steering_table[MSLICE] = NULL;
}
if (IS_XEHPSDV(gt->i915) && slice_mask & BIT(0))
gt->steering_table[GAM] = NULL;
slice = __ffs(slice_mask);
subslice = intel_sseu_find_first_xehp_dss(sseu, GEN_DSS_PER_GSLICE, slice) %
GEN_DSS_PER_GSLICE;
__add_mcr_wa(gt, wal, slice, subslice);
/*
* SQIDI ranges are special because they use different steering
* registers than everything else we work with. On XeHP SDV and
* DG2-G10, any value in the steering registers will work fine since
* all instances are present, but DG2-G11 only has SQIDI instances at
* ID's 2 and 3, so we need to steer to one of those. For simplicity
* we'll just steer to a hardcoded "2" since that value will work
* everywhere.
*/
__set_mcr_steering(wal, MCFG_MCR_SELECTOR, 0, 2);
__set_mcr_steering(wal, SF_MCR_SELECTOR, 0, 2);
/*
* On DG2, GAM registers have a dedicated steering control register
* and must always be programmed to a hardcoded groupid of "1."
*/
if (IS_DG2(gt->i915))
__set_mcr_steering(wal, GAM_MCR_SELECTOR, 1, 0);
}
static void
pvc_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
{
unsigned int dss;
/*
* Setup implicit steering for COMPUTE and DSS ranges to the first
* non-fused-off DSS. All other types of MCR registers will be
* explicitly steered.
*/
dss = intel_sseu_find_first_xehp_dss(>->info.sseu, 0, 0);
__add_mcr_wa(gt, wal, dss / GEN_DSS_PER_CSLICE, dss % GEN_DSS_PER_CSLICE);
}
static void
icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = gt->i915;
icl_wa_init_mcr(gt, wal);
/* WaModifyGamTlbPartitioning:icl */
wa_write_clr_set(wal,
GEN11_GACB_PERF_CTRL,
GEN11_HASH_CTRL_MASK,
GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
/* Wa_1405766107:icl
* Formerly known as WaCL2SFHalfMaxAlloc
*/
wa_write_or(wal,
GEN11_LSN_UNSLCVC,
GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
/* Wa_220166154:icl
* Formerly known as WaDisCtxReload
*/
wa_write_or(wal,
GEN8_GAMW_ECO_DEV_RW_IA,
GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
/* Wa_1406463099:icl
* Formerly known as WaGamTlbPendError
*/
wa_write_or(wal,
GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_L3_COH_PIPE);
/*
* Wa_1408615072:icl,ehl (vsunit)
* Wa_1407596294:icl,ehl (hsunit)
*/
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
/* Wa_1407352427:icl,ehl */
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
PSDUNIT_CLKGATE_DIS);
/* Wa_1406680159:icl,ehl */
wa_mcr_write_or(wal,
GEN11_SUBSLICE_UNIT_LEVEL_CLKGATE,
GWUNIT_CLKGATE_DIS);
/* Wa_1607087056:icl,ehl,jsl */
if (IS_ICELAKE(i915) ||
((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
IS_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)))
wa_write_or(wal,
GEN11_SLICE_UNIT_LEVEL_CLKGATE,
L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
/*
* This is not a documented workaround, but rather an optimization
* to reduce sampler power.
*/
wa_mcr_write_clr(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
}
/*
* Though there are per-engine instances of these registers,
* they retain their value through engine resets and should
* only be provided on the GT workaround list rather than
* the engine-specific workaround list.
*/
static void
wa_14011060649(struct intel_gt *gt, struct i915_wa_list *wal)
{
struct intel_engine_cs *engine;
int id;
for_each_engine(engine, gt, id) {
if (engine->class != VIDEO_DECODE_CLASS ||
(engine->instance % 2))
continue;
wa_write_or(wal, VDBOX_CGCTL3F10(engine->mmio_base),
IECPUNIT_CLKGATE_DIS);
}
}
static void
gen12_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
icl_wa_init_mcr(gt, wal);
/* Wa_14011060649:tgl,rkl,dg1,adl-s,adl-p */
wa_14011060649(gt, wal);
/* Wa_14011059788:tgl,rkl,adl-s,dg1,adl-p */
wa_mcr_write_or(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
/*
* Wa_14015795083
*
* Firmware on some gen12 platforms locks the MISCCPCTL register,
* preventing i915 from modifying it for this workaround. Skip the
* readback verification for this workaround on debug builds; if the
* workaround doesn't stick due to firmware behavior, it's not an error
* that we want CI to flag.
*/
wa_add(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE,
0, 0, false);
}
static void
dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
gen12_gt_workarounds_init(gt, wal);
/* Wa_1409420604:dg1 */
wa_mcr_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE2,
CPSSUNIT_CLKGATE_DIS);
/* Wa_1408615072:dg1 */
/* Empirical testing shows this register is unaffected by engine reset. */
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, VSUNIT_CLKGATE_DIS_TGL);
}
static void
xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = gt->i915;
xehp_init_mcr(gt, wal);
/* Wa_1409757795:xehpsdv */
wa_mcr_write_or(wal, SCCGCTL94DC, CG3DDISURB);
/* Wa_18011725039:xehpsdv */
if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_B0)) {
wa_mcr_masked_dis(wal, MLTICTXCTL, TDONRENDER);
wa_mcr_write_or(wal, L3SQCREG1_CCS0, FLUSHALLNONCOH);
}
/* Wa_16011155590:xehpsdv */
if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
TSGUNIT_CLKGATE_DIS);
/* Wa_14011780169:xehpsdv */
if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_B0, STEP_FOREVER)) {
wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS |
GAMTLBVDBOX7_CLKGATE_DIS |
GAMTLBVDBOX6_CLKGATE_DIS |
GAMTLBVDBOX5_CLKGATE_DIS |
GAMTLBVDBOX4_CLKGATE_DIS |
GAMTLBVDBOX3_CLKGATE_DIS |
GAMTLBVDBOX2_CLKGATE_DIS |
GAMTLBVDBOX1_CLKGATE_DIS |
GAMTLBVDBOX0_CLKGATE_DIS |
GAMTLBKCR_CLKGATE_DIS |
GAMTLBGUC_CLKGATE_DIS |
GAMTLBBLT_CLKGATE_DIS);
wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS |
GAMTLBGFXA1_CLKGATE_DIS |
GAMTLBCOMPA0_CLKGATE_DIS |
GAMTLBCOMPA1_CLKGATE_DIS |
GAMTLBCOMPB0_CLKGATE_DIS |
GAMTLBCOMPB1_CLKGATE_DIS |
GAMTLBCOMPC0_CLKGATE_DIS |
GAMTLBCOMPC1_CLKGATE_DIS |
GAMTLBCOMPD0_CLKGATE_DIS |
GAMTLBCOMPD1_CLKGATE_DIS |
GAMTLBMERT_CLKGATE_DIS |
GAMTLBVEBOX3_CLKGATE_DIS |
GAMTLBVEBOX2_CLKGATE_DIS |
GAMTLBVEBOX1_CLKGATE_DIS |
GAMTLBVEBOX0_CLKGATE_DIS);
}
/* Wa_16012725990:xehpsdv */
if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_FOREVER))
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, VFUNIT_CLKGATE_DIS);
/* Wa_14011060649:xehpsdv */
wa_14011060649(gt, wal);
/* Wa_14012362059:xehpsdv */
wa_mcr_write_or(wal, XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB);
/* Wa_14014368820:xehpsdv */
wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL,
INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE);
/* Wa_14010670810:xehpsdv */
wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
}
static void
dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
struct intel_engine_cs *engine;
int id;
xehp_init_mcr(gt, wal);
/* Wa_14011060649:dg2 */
wa_14011060649(gt, wal);
/*
* Although there are per-engine instances of these registers,
* they technically exist outside the engine itself and are not
* impacted by engine resets. Furthermore, they're part of the
* GuC blacklist so trying to treat them as engine workarounds
* will result in GuC initialization failure and a wedged GPU.
*/
for_each_engine(engine, gt, id) {
if (engine->class != VIDEO_DECODE_CLASS)
continue;
/* Wa_16010515920:dg2_g10 */
if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0))
wa_write_or(wal, VDBOX_CGCTL3F18(engine->mmio_base),
ALNUNIT_CLKGATE_DIS);
}
if (IS_DG2_G10(gt->i915)) {
/* Wa_22010523718:dg2 */
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
CG3DDISCFEG_CLKGATE_DIS);
/* Wa_14011006942:dg2 */
wa_mcr_write_or(wal, GEN11_SUBSLICE_UNIT_LEVEL_CLKGATE,
DSS_ROUTER_CLKGATE_DIS);
}
if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0) ||
IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0)) {
/* Wa_14012362059:dg2 */
wa_mcr_write_or(wal, XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB);
}
if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0)) {
/* Wa_14010948348:dg2_g10 */
wa_write_or(wal, UNSLCGCTL9430, MSQDUNIT_CLKGATE_DIS);
/* Wa_14011037102:dg2_g10 */
wa_write_or(wal, UNSLCGCTL9444, LTCDD_CLKGATE_DIS);
/* Wa_14011371254:dg2_g10 */
wa_mcr_write_or(wal, XEHP_SLICE_UNIT_LEVEL_CLKGATE, NODEDSS_CLKGATE_DIS);
/* Wa_14011431319:dg2_g10 */
wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS |
GAMTLBVDBOX7_CLKGATE_DIS |
GAMTLBVDBOX6_CLKGATE_DIS |
GAMTLBVDBOX5_CLKGATE_DIS |
GAMTLBVDBOX4_CLKGATE_DIS |
GAMTLBVDBOX3_CLKGATE_DIS |
GAMTLBVDBOX2_CLKGATE_DIS |
GAMTLBVDBOX1_CLKGATE_DIS |
GAMTLBVDBOX0_CLKGATE_DIS |
GAMTLBKCR_CLKGATE_DIS |
GAMTLBGUC_CLKGATE_DIS |
GAMTLBBLT_CLKGATE_DIS);
wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS |
GAMTLBGFXA1_CLKGATE_DIS |
GAMTLBCOMPA0_CLKGATE_DIS |
GAMTLBCOMPA1_CLKGATE_DIS |
GAMTLBCOMPB0_CLKGATE_DIS |
GAMTLBCOMPB1_CLKGATE_DIS |
GAMTLBCOMPC0_CLKGATE_DIS |
GAMTLBCOMPC1_CLKGATE_DIS |
GAMTLBCOMPD0_CLKGATE_DIS |
GAMTLBCOMPD1_CLKGATE_DIS |
GAMTLBMERT_CLKGATE_DIS |
GAMTLBVEBOX3_CLKGATE_DIS |
GAMTLBVEBOX2_CLKGATE_DIS |
GAMTLBVEBOX1_CLKGATE_DIS |
GAMTLBVEBOX0_CLKGATE_DIS);
/* Wa_14010569222:dg2_g10 */
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
GAMEDIA_CLKGATE_DIS);
/* Wa_14011028019:dg2_g10 */
wa_mcr_write_or(wal, SSMCGCTL9530, RTFUNIT_CLKGATE_DIS);
/* Wa_14010680813:dg2_g10 */
wa_mcr_write_or(wal, XEHP_GAMSTLB_CTRL,
CONTROL_BLOCK_CLKGATE_DIS |
EGRESS_BLOCK_CLKGATE_DIS |
TAG_BLOCK_CLKGATE_DIS);
}
/* Wa_14014830051:dg2 */
wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
/* Wa_14015795083 */
wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
/* Wa_18018781329 */
wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
/* Wa_1509235366:dg2 */
wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL,
INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE);
/* Wa_14010648519:dg2 */
wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
}
static void
pvc_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
pvc_init_mcr(gt, wal);
/* Wa_14015795083 */
wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
/* Wa_18018781329 */
wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
/* Wa_16016694945 */
wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC);
}
static void
xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
/* Wa_14018778641 / Wa_18018781329 */
wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
/* Wa_22016670082 */
wa_write_or(wal, GEN12_SQCNT1, GEN12_STRICT_RAR_ENABLE);
if (IS_MTL_GRAPHICS_STEP(gt->i915, M, STEP_A0, STEP_B0) ||
IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0)) {
/* Wa_14014830051 */
wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
/* Wa_14015795083 */
wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
}
/*
* Unlike older platforms, we no longer setup implicit steering here;
* all MCR accesses are explicitly steered.
*/
debug_dump_steering(gt);
}
static void
xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
/*
* Wa_14018778641
* Wa_18018781329
*
* Note that although these registers are MCR on the primary
* GT, the media GT's versions are regular singleton registers.
*/
wa_write_or(wal, XELPMP_GSC_MOD_CTRL, FORCE_MISS_FTLB);
debug_dump_steering(gt);
}
/*
* The bspec performance guide has recommended MMIO tuning settings. These
* aren't truly "workarounds" but we want to program them through the
* workaround infrastructure to make sure they're (re)applied at the proper
* times.
*
* The programming in this function is for settings that persist through
* engine resets and also are not part of any engine's register state context.
* I.e., settings that only need to be re-applied in the event of a full GT
* reset.
*/
static void gt_tuning_settings(struct intel_gt *gt, struct i915_wa_list *wal)
{
if (IS_METEORLAKE(gt->i915)) {
if (gt->type != GT_MEDIA)
wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
}
if (IS_PONTEVECCHIO(gt->i915)) {
wa_mcr_write(wal, XEHPC_L3SCRUB,
SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_HOSTCACHEEN);
}
if (IS_DG2(gt->i915)) {
wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
}
}
static void
gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = gt->i915;
gt_tuning_settings(gt, wal);
if (gt->type == GT_MEDIA) {
if (MEDIA_VER(i915) >= 13)
xelpmp_gt_workarounds_init(gt, wal);
else
MISSING_CASE(MEDIA_VER(i915));
return;
}
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
xelpg_gt_workarounds_init(gt, wal);
else if (IS_PONTEVECCHIO(i915))
pvc_gt_workarounds_init(gt, wal);
else if (IS_DG2(i915))
dg2_gt_workarounds_init(gt, wal);
else if (IS_XEHPSDV(i915))
xehpsdv_gt_workarounds_init(gt, wal);
else if (IS_DG1(i915))
dg1_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) == 12)
gen12_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) == 11)
icl_gt_workarounds_init(gt, wal);
else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
cfl_gt_workarounds_init(gt, wal);
else if (IS_GEMINILAKE(i915))
glk_gt_workarounds_init(gt, wal);
else if (IS_KABYLAKE(i915))
kbl_gt_workarounds_init(gt, wal);
else if (IS_BROXTON(i915))
gen9_gt_workarounds_init(gt, wal);
else if (IS_SKYLAKE(i915))
skl_gt_workarounds_init(gt, wal);
else if (IS_HASWELL(i915))
hsw_gt_workarounds_init(gt, wal);
else if (IS_VALLEYVIEW(i915))
vlv_gt_workarounds_init(gt, wal);
else if (IS_IVYBRIDGE(i915))
ivb_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) == 6)
snb_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) == 5)
ilk_gt_workarounds_init(gt, wal);
else if (IS_G4X(i915))
g4x_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) == 4)
gen4_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) <= 8)
;
else
MISSING_CASE(GRAPHICS_VER(i915));
}
void intel_gt_init_workarounds(struct intel_gt *gt)
{
struct i915_wa_list *wal = >->wa_list;
wa_init_start(wal, gt, "GT", "global");
gt_init_workarounds(gt, wal);
wa_init_finish(wal);
}
static bool
wa_verify(struct intel_gt *gt, const struct i915_wa *wa, u32 cur,
const char *name, const char *from)
{
if ((cur ^ wa->set) & wa->read) {
drm_err(>->i915->drm,
"%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n",
name, from, i915_mmio_reg_offset(wa->reg),
cur, cur & wa->read, wa->set & wa->read);
return false;
}
return true;
}
static void wa_list_apply(const struct i915_wa_list *wal)
{
struct intel_gt *gt = wal->gt;
struct intel_uncore *uncore = gt->uncore;
enum forcewake_domains fw;
unsigned long flags;
struct i915_wa *wa;
unsigned int i;
if (!wal->count)
return;
fw = wal_get_fw_for_rmw(uncore, wal);
intel_gt_mcr_lock(gt, &flags);
spin_lock(&uncore->lock);
intel_uncore_forcewake_get__locked(uncore, fw);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
u32 val, old = 0;
/* open-coded rmw due to steering */
if (wa->clr)
old = wa->is_mcr ?
intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) :
intel_uncore_read_fw(uncore, wa->reg);
val = (old & ~wa->clr) | wa->set;
if (val != old || !wa->clr) {
if (wa->is_mcr)
intel_gt_mcr_multicast_write_fw(gt, wa->mcr_reg, val);
else
intel_uncore_write_fw(uncore, wa->reg, val);
}
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
u32 val = wa->is_mcr ?
intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) :
intel_uncore_read_fw(uncore, wa->reg);
wa_verify(gt, wa, val, wal->name, "application");
}
}
intel_uncore_forcewake_put__locked(uncore, fw);
spin_unlock(&uncore->lock);
intel_gt_mcr_unlock(gt, flags);
}
void intel_gt_apply_workarounds(struct intel_gt *gt)
{
wa_list_apply(>->wa_list);
}
static bool wa_list_verify(struct intel_gt *gt,
const struct i915_wa_list *wal,
const char *from)
{
struct intel_uncore *uncore = gt->uncore;
struct i915_wa *wa;
enum forcewake_domains fw;
unsigned long flags;
unsigned int i;
bool ok = true;
fw = wal_get_fw_for_rmw(uncore, wal);
intel_gt_mcr_lock(gt, &flags);
spin_lock(&uncore->lock);
intel_uncore_forcewake_get__locked(uncore, fw);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
ok &= wa_verify(wal->gt, wa, wa->is_mcr ?
intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) :
intel_uncore_read_fw(uncore, wa->reg),
wal->name, from);
intel_uncore_forcewake_put__locked(uncore, fw);
spin_unlock(&uncore->lock);
intel_gt_mcr_unlock(gt, flags);
return ok;
}
bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
{
return wa_list_verify(gt, >->wa_list, from);
}
__maybe_unused
static bool is_nonpriv_flags_valid(u32 flags)
{
/* Check only valid flag bits are set */
if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID)
return false;
/* NB: Only 3 out of 4 enum values are valid for access field */
if ((flags & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
RING_FORCE_TO_NONPRIV_ACCESS_INVALID)
return false;
return true;
}
static void
whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags)
{
struct i915_wa wa = {
.reg = reg
};
if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
return;
if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags)))
return;
wa.reg.reg |= flags;
_wa_add(wal, &wa);
}
static void
whitelist_mcr_reg_ext(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 flags)
{
struct i915_wa wa = {
.mcr_reg = reg,
.is_mcr = 1,
};
if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
return;
if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags)))
return;
wa.mcr_reg.reg |= flags;
_wa_add(wal, &wa);
}
static void
whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
{
whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW);
}
static void
whitelist_mcr_reg(struct i915_wa_list *wal, i915_mcr_reg_t reg)
{
whitelist_mcr_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW);
}
static void gen9_whitelist_build(struct i915_wa_list *w)
{
/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
whitelist_reg(w, GEN8_CS_CHICKEN1);
/* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
whitelist_reg(w, GEN8_HDC_CHICKEN1);
/* WaSendPushConstantsFromMMIO:skl,bxt */
whitelist_reg(w, COMMON_SLICE_CHICKEN2);
}
static void skl_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
if (engine->class != RENDER_CLASS)
return;
gen9_whitelist_build(w);
/* WaDisableLSQCROPERFforOCL:skl */
whitelist_mcr_reg(w, GEN8_L3SQCREG4);
}
static void bxt_whitelist_build(struct intel_engine_cs *engine)
{
if (engine->class != RENDER_CLASS)
return;
gen9_whitelist_build(&engine->whitelist);
}
static void kbl_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
if (engine->class != RENDER_CLASS)
return;
gen9_whitelist_build(w);
/* WaDisableLSQCROPERFforOCL:kbl */
whitelist_mcr_reg(w, GEN8_L3SQCREG4);
}
static void glk_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
if (engine->class != RENDER_CLASS)
return;
gen9_whitelist_build(w);
/* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
}
static void cfl_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
if (engine->class != RENDER_CLASS)
return;
gen9_whitelist_build(w);
/*
* WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml
*
* This covers 4 register which are next to one another :
* - PS_INVOCATION_COUNT
* - PS_INVOCATION_COUNT_UDW
* - PS_DEPTH_COUNT
* - PS_DEPTH_COUNT_UDW
*/
whitelist_reg_ext(w, PS_INVOCATION_COUNT,
RING_FORCE_TO_NONPRIV_ACCESS_RD |
RING_FORCE_TO_NONPRIV_RANGE_4);
}
static void allow_read_ctx_timestamp(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
if (engine->class != RENDER_CLASS)
whitelist_reg_ext(w,
RING_CTX_TIMESTAMP(engine->mmio_base),
RING_FORCE_TO_NONPRIV_ACCESS_RD);
}
static void cml_whitelist_build(struct intel_engine_cs *engine)
{
allow_read_ctx_timestamp(engine);
cfl_whitelist_build(engine);
}
static void icl_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
allow_read_ctx_timestamp(engine);
switch (engine->class) {
case RENDER_CLASS:
/* WaAllowUMDToModifyHalfSliceChicken7:icl */
whitelist_mcr_reg(w, GEN9_HALF_SLICE_CHICKEN7);
/* WaAllowUMDToModifySamplerMode:icl */
whitelist_mcr_reg(w, GEN10_SAMPLER_MODE);
/* WaEnableStateCacheRedirectToCS:icl */
whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
/*
* WaAllowPMDepthAndInvocationCountAccessFromUMD:icl
*
* This covers 4 register which are next to one another :
* - PS_INVOCATION_COUNT
* - PS_INVOCATION_COUNT_UDW
* - PS_DEPTH_COUNT
* - PS_DEPTH_COUNT_UDW
*/
whitelist_reg_ext(w, PS_INVOCATION_COUNT,
RING_FORCE_TO_NONPRIV_ACCESS_RD |
RING_FORCE_TO_NONPRIV_RANGE_4);
break;
case VIDEO_DECODE_CLASS:
/* hucStatusRegOffset */
whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base),
RING_FORCE_TO_NONPRIV_ACCESS_RD);
/* hucUKernelHdrInfoRegOffset */
whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base),
RING_FORCE_TO_NONPRIV_ACCESS_RD);
/* hucStatus2RegOffset */
whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
RING_FORCE_TO_NONPRIV_ACCESS_RD);
break;
default:
break;
}
}
static void tgl_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
allow_read_ctx_timestamp(engine);
switch (engine->class) {
case RENDER_CLASS:
/*
* WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl
* Wa_1408556865:tgl
*
* This covers 4 registers which are next to one another :
* - PS_INVOCATION_COUNT
* - PS_INVOCATION_COUNT_UDW
* - PS_DEPTH_COUNT
* - PS_DEPTH_COUNT_UDW
*/
whitelist_reg_ext(w, PS_INVOCATION_COUNT,
RING_FORCE_TO_NONPRIV_ACCESS_RD |
RING_FORCE_TO_NONPRIV_RANGE_4);
/*
* Wa_1808121037:tgl
* Wa_14012131227:dg1
* Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p
*/
whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
/* Wa_1806527549:tgl */
whitelist_reg(w, HIZ_CHICKEN);
/* Required by recommended tuning setting (not a workaround) */
whitelist_reg(w, GEN11_COMMON_SLICE_CHICKEN3);
break;
default:
break;
}
}
static void dg2_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
switch (engine->class) {
case RENDER_CLASS:
/*
* Wa_1507100340:dg2_g10
*
* This covers 4 registers which are next to one another :
* - PS_INVOCATION_COUNT
* - PS_INVOCATION_COUNT_UDW
* - PS_DEPTH_COUNT
* - PS_DEPTH_COUNT_UDW
*/
if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0))
whitelist_reg_ext(w, PS_INVOCATION_COUNT,
RING_FORCE_TO_NONPRIV_ACCESS_RD |
RING_FORCE_TO_NONPRIV_RANGE_4);
/* Required by recommended tuning setting (not a workaround) */
whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
break;
case COMPUTE_CLASS:
/* Wa_16011157294:dg2_g10 */
if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0))
whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
break;
default:
break;
}
}
static void blacklist_trtt(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
/*
* Prevent read/write access to [0x4400, 0x4600) which covers
* the TRTT range across all engines. Note that normally userspace
* cannot access the other engines' trtt control, but for simplicity
* we cover the entire range on each engine.
*/
whitelist_reg_ext(w, _MMIO(0x4400),
RING_FORCE_TO_NONPRIV_DENY |
RING_FORCE_TO_NONPRIV_RANGE_64);
whitelist_reg_ext(w, _MMIO(0x4500),
RING_FORCE_TO_NONPRIV_DENY |
RING_FORCE_TO_NONPRIV_RANGE_64);
}
static void pvc_whitelist_build(struct intel_engine_cs *engine)
{
/* Wa_16014440446:pvc */
blacklist_trtt(engine);
}
static void mtl_whitelist_build(struct intel_engine_cs *engine)
{
struct i915_wa_list *w = &engine->whitelist;
switch (engine->class) {
case RENDER_CLASS:
/* Required by recommended tuning setting (not a workaround) */
whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
break;
default:
break;
}
}
void intel_engine_init_whitelist(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
struct i915_wa_list *w = &engine->whitelist;
wa_init_start(w, engine->gt, "whitelist", engine->name);
if (IS_METEORLAKE(i915))
mtl_whitelist_build(engine);
else if (IS_PONTEVECCHIO(i915))
pvc_whitelist_build(engine);
else if (IS_DG2(i915))
dg2_whitelist_build(engine);
else if (IS_XEHPSDV(i915))
; /* none needed */
else if (GRAPHICS_VER(i915) == 12)
tgl_whitelist_build(engine);
else if (GRAPHICS_VER(i915) == 11)
icl_whitelist_build(engine);
else if (IS_COMETLAKE(i915))
cml_whitelist_build(engine);
else if (IS_COFFEELAKE(i915))
cfl_whitelist_build(engine);
else if (IS_GEMINILAKE(i915))
glk_whitelist_build(engine);
else if (IS_KABYLAKE(i915))
kbl_whitelist_build(engine);
else if (IS_BROXTON(i915))
bxt_whitelist_build(engine);
else if (IS_SKYLAKE(i915))
skl_whitelist_build(engine);
else if (GRAPHICS_VER(i915) <= 8)
;
else
MISSING_CASE(GRAPHICS_VER(i915));
wa_init_finish(w);
}
void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
{
const struct i915_wa_list *wal = &engine->whitelist;
struct intel_uncore *uncore = engine->uncore;
const u32 base = engine->mmio_base;
struct i915_wa *wa;
unsigned int i;
if (!wal->count)
return;
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
intel_uncore_write(uncore,
RING_FORCE_TO_NONPRIV(base, i),
i915_mmio_reg_offset(wa->reg));
/* And clear the rest just in case of garbage */
for (; i < RING_MAX_NONPRIV_SLOTS; i++)
intel_uncore_write(uncore,
RING_FORCE_TO_NONPRIV(base, i),
i915_mmio_reg_offset(RING_NOPID(base)));
}
/*
* engine_fake_wa_init(), a place holder to program the registers
* which are not part of an official workaround defined by the
* hardware team.
* Adding programming of those register inside workaround will
* allow utilizing wa framework to proper application and verification.
*/
static void
engine_fake_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
u8 mocs_w, mocs_r;
/*
* RING_CMD_CCTL specifies the default MOCS entry that will be used
* by the command streamer when executing commands that don't have
* a way to explicitly specify a MOCS setting. The default should
* usually reference whichever MOCS entry corresponds to uncached
* behavior, although use of a WB cached entry is recommended by the
* spec in certain circumstances on specific platforms.
*/
if (GRAPHICS_VER(engine->i915) >= 12) {
mocs_r = engine->gt->mocs.uc_index;
mocs_w = engine->gt->mocs.uc_index;
if (HAS_L3_CCS_READ(engine->i915) &&
engine->class == COMPUTE_CLASS) {
mocs_r = engine->gt->mocs.wb_index;
/*
* Even on the few platforms where MOCS 0 is a
* legitimate table entry, it's never the correct
* setting to use here; we can assume the MOCS init
* just forgot to initialize wb_index.
*/
drm_WARN_ON(&engine->i915->drm, mocs_r == 0);
}
wa_masked_field_set(wal,
RING_CMD_CCTL(engine->mmio_base),
CMD_CCTL_MOCS_MASK,
CMD_CCTL_MOCS_OVERRIDE(mocs_w, mocs_r));
}
}
static bool needs_wa_1308578152(struct intel_engine_cs *engine)
{
return intel_sseu_find_first_xehp_dss(&engine->gt->info.sseu, 0, 0) >=
GEN_DSS_PER_GSLICE;
}
static void
rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0)) {
/* Wa_22014600077 */
wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS,
ENABLE_EU_COUNT_FOR_TDL_FLUSH);
}
if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0) ||
IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
IS_DG2_G11(i915) || IS_DG2_G12(i915)) {
/* Wa_1509727124 */
wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
SC_DISABLE_POWER_OPTIMIZATION_EBB);
}
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
IS_DG2_G11(i915) || IS_DG2_G12(i915) ||
IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0)) {
/* Wa_22012856258 */
wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
GEN12_DISABLE_READ_SUPPRESSION);
}
if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
/* Wa_14013392000:dg2_g11 */
wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, GEN12_ENABLE_LARGE_GRF_MODE);
}
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0) ||
IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
/* Wa_14012419201:dg2 */
wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4,
GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX);
}
/* Wa_1308578152:dg2_g10 when first gslice is fused off */
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) &&
needs_wa_1308578152(engine)) {
wa_masked_dis(wal, GEN12_CS_DEBUG_MODE1_CCCSUNIT_BE_COMMON,
GEN12_REPLAY_MODE_GRANULARITY);
}
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
IS_DG2_G11(i915) || IS_DG2_G12(i915)) {
/*
* Wa_22010960976:dg2
* Wa_14013347512:dg2
*/
wa_mcr_masked_dis(wal, XEHP_HDC_CHICKEN0,
LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK);
}
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) {
/*
* Wa_1608949956:dg2_g10
* Wa_14010198302:dg2_g10
*/
wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
MDQ_ARBITRATION_MODE | UGM_BACKUP_MODE);
}
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0))
/* Wa_22010430635:dg2 */
wa_mcr_masked_en(wal,
GEN9_ROW_CHICKEN4,
GEN12_DISABLE_GRF_CLEAR);
/* Wa_14013202645:dg2 */
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) ||
IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0))
wa_mcr_write_or(wal, RT_CTRL, DIS_NULL_QUERY);
/* Wa_22012532006:dg2 */
if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_C0) ||
IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0))
wa_mcr_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA);
if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_B0, STEP_FOREVER) ||
IS_DG2_G10(i915)) {
/* Wa_22014600077:dg2 */
wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
_MASKED_BIT_ENABLE(ENABLE_EU_COUNT_FOR_TDL_FLUSH),
0 /* Wa_14012342262 write-only reg, so skip verification */,
true);
}
if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || IS_DG1(i915) ||
IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
/* Wa_1606931601:tgl,rkl,dg1,adl-s,adl-p */
wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ);
/*
* Wa_1407928979:tgl A*
* Wa_18011464164:tgl[B0+],dg1[B0+]
* Wa_22010931296:tgl[B0+],dg1[B0+]
* Wa_14010919138:rkl,dg1,adl-s,adl-p
*/
wa_write_or(wal, GEN7_FF_THREAD_MODE,
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
}
if (IS_ALDERLAKE_P(i915) || IS_DG2(i915) || IS_ALDERLAKE_S(i915) ||
IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
/*
* Wa_1606700617:tgl,dg1,adl-p
* Wa_22010271021:tgl,rkl,dg1,adl-s,adl-p
* Wa_14010826681:tgl,dg1,rkl,adl-p
* Wa_18019627453:dg2
*/
wa_masked_en(wal,
GEN9_CS_DEBUG_MODE1,
FF_DOP_CLOCK_GATE_DISABLE);
}
if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
/* Wa_1409804808 */
wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
GEN12_PUSH_CONST_DEREF_HOLD_DIS);
/* Wa_14010229206 */
wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
}
if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) {
/*
* Wa_1607297627
*
* On TGL and RKL there are multiple entries for this WA in the
* BSpec; some indicate this is an A0-only WA, others indicate
* it applies to all steppings so we trust the "all steppings."
*/
wa_masked_en(wal,
RING_PSMI_CTL(RENDER_RING_BASE),
GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
GEN8_RC_SEMA_IDLE_MSG_DISABLE);
}
if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) ||
IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) {
/* Wa_1406941453:tgl,rkl,dg1,adl-s,adl-p */
wa_mcr_masked_en(wal,
GEN10_SAMPLER_MODE,
ENABLE_SMALLPL);
}
if (GRAPHICS_VER(i915) == 11) {
/* This is not an Wa. Enable for better image quality */
wa_masked_en(wal,
_3D_CHICKEN3,
_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
/*
* Wa_1405543622:icl
* Formerly known as WaGAPZPriorityScheme
*/
wa_write_or(wal,
GEN8_GARBCNTL,
GEN11_ARBITRATION_PRIO_ORDER_MASK);
/*
* Wa_1604223664:icl
* Formerly known as WaL3BankAddressHashing
*/
wa_write_clr_set(wal,
GEN8_GARBCNTL,
GEN11_HASH_CTRL_EXCL_MASK,
GEN11_HASH_CTRL_EXCL_BIT0);
wa_write_clr_set(wal,
GEN11_GLBLINVL,
GEN11_BANK_HASH_ADDR_EXCL_MASK,
GEN11_BANK_HASH_ADDR_EXCL_BIT0);
/*
* Wa_1405733216:icl
* Formerly known as WaDisableCleanEvicts
*/
wa_mcr_write_or(wal,
GEN8_L3SQCREG4,
GEN11_LQSC_CLEAN_EVICT_DISABLE);
/* Wa_1606682166:icl */
wa_write_or(wal,
GEN7_SARCHKMD,
GEN7_DISABLE_SAMPLER_PREFETCH);
/* Wa_1409178092:icl */
wa_mcr_write_clr_set(wal,
GEN11_SCRATCH2,
GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
0);
/* WaEnable32PlaneMode:icl */
wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS,
GEN11_ENABLE_32_PLANE_MODE);
/*
* Wa_1408767742:icl[a2..forever],ehl[all]
* Wa_1605460711:icl[a0..c0]
*/
wa_write_or(wal,
GEN7_FF_THREAD_MODE,
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
/* Wa_22010271021 */
wa_masked_en(wal,
GEN9_CS_DEBUG_MODE1,
FF_DOP_CLOCK_GATE_DISABLE);
}
/*
* Intel platforms that support fine-grained preemption (i.e., gen9 and
* beyond) allow the kernel-mode driver to choose between two different
* options for controlling preemption granularity and behavior.
*
* Option 1 (hardware default):
* Preemption settings are controlled in a global manner via
* kernel-only register CS_DEBUG_MODE1 (0x20EC). Any granularity
* and settings chosen by the kernel-mode driver will apply to all
* userspace clients.
*
* Option 2:
* Preemption settings are controlled on a per-context basis via
* register CS_CHICKEN1 (0x2580). CS_CHICKEN1 is saved/restored on
* context switch and is writable by userspace (e.g., via
* MI_LOAD_REGISTER_IMMEDIATE instructions placed in a batch buffer)
* which allows different userspace drivers/clients to select
* different settings, or to change those settings on the fly in
* response to runtime needs. This option was known by name
* "FtrPerCtxtPreemptionGranularityControl" at one time, although
* that name is somewhat misleading as other non-granularity
* preemption settings are also impacted by this decision.
*
* On Linux, our policy has always been to let userspace drivers
* control preemption granularity/settings (Option 2). This was
* originally mandatory on gen9 to prevent ABI breakage (old gen9
* userspace developed before object-level preemption was enabled would
* not behave well if i915 were to go with Option 1 and enable that
* preemption in a global manner). On gen9 each context would have
* object-level preemption disabled by default (see
* WaDisable3DMidCmdPreemption in gen9_ctx_workarounds_init), but
* userspace drivers could opt-in to object-level preemption as they
* saw fit. For post-gen9 platforms, we continue to utilize Option 2;
* even though it is no longer necessary for ABI compatibility when
* enabling a new platform, it does ensure that userspace will be able
* to implement any workarounds that show up requiring temporary
* adjustments to preemption behavior at runtime.
*
* Notes/Workarounds:
* - Wa_14015141709: On DG2 and early steppings of MTL,
* CS_CHICKEN1[0] does not disable object-level preemption as
* it is supposed to (nor does CS_DEBUG_MODE1[0] if we had been
* using Option 1). Effectively this means userspace is unable
* to disable object-level preemption on these platforms/steppings
* despite the setting here.
*
* - Wa_16013994831: May require that userspace program
* CS_CHICKEN1[10] when certain runtime conditions are true.
* Userspace requires Option 2 to be in effect for their update of
* CS_CHICKEN1[10] to be effective.
*
* Other workarounds may appear in the future that will also require
* Option 2 behavior to allow proper userspace implementation.
*/
if (GRAPHICS_VER(i915) >= 9)
wa_masked_en(wal,
GEN7_FF_SLICE_CS_CHICKEN1,
GEN9_FFSC_PERCTX_PREEMPT_CTRL);
if (IS_SKYLAKE(i915) ||
IS_KABYLAKE(i915) ||
IS_COFFEELAKE(i915) ||
IS_COMETLAKE(i915)) {
/* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
wa_write_or(wal,
GEN8_GARBCNTL,
GEN9_GAPS_TSV_CREDIT_DISABLE);
}
if (IS_BROXTON(i915)) {
/* WaDisablePooledEuLoadBalancingFix:bxt */
wa_masked_en(wal,
FF_SLICE_CS_CHICKEN2,
GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
}
if (GRAPHICS_VER(i915) == 9) {
/* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
wa_masked_en(wal,
GEN9_CSFE_CHICKEN1_RCS,
GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
/* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
wa_mcr_write_or(wal,
BDW_SCRATCH1,
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
/* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
if (IS_GEN9_LP(i915))
wa_mcr_write_clr_set(wal,
GEN8_L3SQCREG1,
L3_PRIO_CREDITS_MASK,
L3_GENERAL_PRIO_CREDITS(62) |
L3_HIGH_PRIO_CREDITS(2));
/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
wa_mcr_write_or(wal,
GEN8_L3SQCREG4,
GEN8_LQSC_FLUSH_COHERENT_LINES);
/* Disable atomics in L3 to prevent unrecoverable hangs */
wa_write_clr_set(wal, GEN9_SCRATCH_LNCF1,
GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE, 0);
wa_mcr_write_clr_set(wal, GEN8_L3SQCREG4,
GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE, 0);
wa_mcr_write_clr_set(wal, GEN9_SCRATCH1,
EVICTION_PERF_FIX_ENABLE, 0);
}
if (IS_HASWELL(i915)) {
/* WaSampleCChickenBitEnable:hsw */
wa_masked_en(wal,
HSW_HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE);
wa_masked_dis(wal,
CACHE_MODE_0_GEN7,
/* enable HiZ Raw Stall Optimization */
HIZ_RAW_STALL_OPT_DISABLE);
}
if (IS_VALLEYVIEW(i915)) {
/* WaDisableEarlyCull:vlv */
wa_masked_en(wal,
_3D_CHICKEN3,
_3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
/*
* WaVSThreadDispatchOverride:ivb,vlv
*
* This actually overrides the dispatch
* mode for all thread types.
*/
wa_write_clr_set(wal,
GEN7_FF_THREAD_MODE,
GEN7_FF_SCHED_MASK,
GEN7_FF_TS_SCHED_HW |
GEN7_FF_VS_SCHED_HW |
GEN7_FF_DS_SCHED_HW);
/* WaPsdDispatchEnable:vlv */
/* WaDisablePSDDualDispatchEnable:vlv */
wa_masked_en(wal,
GEN7_HALF_SLICE_CHICKEN1,
GEN7_MAX_PS_THREAD_DEP |
GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
}
if (IS_IVYBRIDGE(i915)) {
/* WaDisableEarlyCull:ivb */
wa_masked_en(wal,
_3D_CHICKEN3,
_3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
if (0) { /* causes HiZ corruption on ivb:gt1 */
/* enable HiZ Raw Stall Optimization */
wa_masked_dis(wal,
CACHE_MODE_0_GEN7,
HIZ_RAW_STALL_OPT_DISABLE);
}
/*
* WaVSThreadDispatchOverride:ivb,vlv
*
* This actually overrides the dispatch
* mode for all thread types.
*/
wa_write_clr_set(wal,
GEN7_FF_THREAD_MODE,
GEN7_FF_SCHED_MASK,
GEN7_FF_TS_SCHED_HW |
GEN7_FF_VS_SCHED_HW |
GEN7_FF_DS_SCHED_HW);
/* WaDisablePSDDualDispatchEnable:ivb */
if (IS_IVB_GT1(i915))
wa_masked_en(wal,
GEN7_HALF_SLICE_CHICKEN1,
GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
}
if (GRAPHICS_VER(i915) == 7) {
/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
wa_masked_en(wal,
RING_MODE_GEN7(RENDER_RING_BASE),
GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE);
/* WaDisable_RenderCache_OperationalFlush:ivb,vlv,hsw */
wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
/*
* BSpec says this must be set, even though
* WaDisable4x2SubspanOptimization:ivb,hsw
* WaDisable4x2SubspanOptimization isn't listed for VLV.
*/
wa_masked_en(wal,
CACHE_MODE_1,
PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
/*
* BSpec recommends 8x4 when MSAA is used,
* however in practice 16x4 seems fastest.
*
* Note that PS/WM thread counts depend on the WIZ hashing
* disable bit, which we don't touch here, but it's good
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
wa_masked_field_set(wal,
GEN7_GT_MODE,
GEN6_WIZ_HASHING_MASK,
GEN6_WIZ_HASHING_16x4);
}
if (IS_GRAPHICS_VER(i915, 6, 7))
/*
* We need to disable the AsyncFlip performance optimisations in
* order to use MI_WAIT_FOR_EVENT within the CS. It should
* already be programmed to '1' on all products.
*
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
*/
wa_masked_en(wal,
RING_MI_MODE(RENDER_RING_BASE),
ASYNC_FLIP_PERF_DISABLE);
if (GRAPHICS_VER(i915) == 6) {
/*
* Required for the hardware to program scanline values for
* waiting
* WaEnableFlushTlbInvalidationMode:snb
*/
wa_masked_en(wal,
GFX_MODE,
GFX_TLB_INVALIDATE_EXPLICIT);
/* WaDisableHiZPlanesWhenMSAAEnabled:snb */
wa_masked_en(wal,
_3D_CHICKEN,
_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB);
wa_masked_en(wal,
_3D_CHICKEN3,
/* WaStripsFansDisableFastClipPerformanceFix:snb */
_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL |
/*
* Bspec says:
* "This bit must be set if 3DSTATE_CLIP clip mode is set
* to normal and 3DSTATE_SF number of SF output attributes
* is more than 16."
*/
_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH);
/*
* BSpec recommends 8x4 when MSAA is used,
* however in practice 16x4 seems fastest.
*
* Note that PS/WM thread counts depend on the WIZ hashing
* disable bit, which we don't touch here, but it's good
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
wa_masked_field_set(wal,
GEN6_GT_MODE,
GEN6_WIZ_HASHING_MASK,
GEN6_WIZ_HASHING_16x4);
/* WaDisable_RenderCache_OperationalFlush:snb */
wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
/*
* From the Sandybridge PRM, volume 1 part 3, page 24:
* "If this bit is set, STCunit will have LRA as replacement
* policy. [...] This bit must be reset. LRA replacement
* policy is not supported."
*/
wa_masked_dis(wal,
CACHE_MODE_0,
CM0_STC_EVICT_DISABLE_LRA_SNB);
}
if (IS_GRAPHICS_VER(i915, 4, 6))
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
wa_add(wal, RING_MI_MODE(RENDER_RING_BASE),
0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
/* XXX bit doesn't stick on Broadwater */
IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH, true);
if (GRAPHICS_VER(i915) == 4)
/*
* Disable CONSTANT_BUFFER before it is loaded from the context
* image. For as it is loaded, it is executed and the stored
* address may no longer be valid, leading to a GPU hang.
*
* This imposes the requirement that userspace reload their
* CONSTANT_BUFFER on every batch, fortunately a requirement
* they are already accustomed to from before contexts were
* enabled.
*/
wa_add(wal, ECOSKPD(RENDER_RING_BASE),
0, _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE),
0 /* XXX bit doesn't stick on Broadwater */,
true);
}
static void
xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
/* WaKBLVECSSemaphoreWaitPoll:kbl */
if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, STEP_A0, STEP_F0)) {
wa_write(wal,
RING_SEMA_WAIT_POLL(engine->mmio_base),
1);
}
}
static void
ccs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
if (IS_PVC_CT_STEP(engine->i915, STEP_A0, STEP_C0)) {
/* Wa_14014999345:pvc */
wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS, DISABLE_ECC);
}
}
/*
* The bspec performance guide has recommended MMIO tuning settings. These
* aren't truly "workarounds" but we want to program them with the same
* workaround infrastructure to ensure that they're automatically added to
* the GuC save/restore lists, re-applied at the right times, and checked for
* any conflicting programming requested by real workarounds.
*
* Programming settings should be added here only if their registers are not
* part of an engine's register state context. If a register is part of a
* context, then any tuning settings should be programmed in an appropriate
* function invoked by __intel_engine_init_ctx_wa().
*/
static void
add_render_compute_tuning_settings(struct drm_i915_private *i915,
struct i915_wa_list *wal)
{
if (IS_METEORLAKE(i915) || IS_DG2(i915))
wa_mcr_write_clr_set(wal, RT_CTRL, STACKID_CTRL, STACKID_CTRL_512);
/*
* This tuning setting proves beneficial only on ATS-M designs; the
* default "age based" setting is optimal on regular DG2 and other
* platforms.
*/
if (INTEL_INFO(i915)->tuning_thread_rr_after_dep)
wa_mcr_masked_field_set(wal, GEN9_ROW_CHICKEN4, THREAD_EX_ARB_MODE,
THREAD_EX_ARB_MODE_RR_AFTER_DEP);
if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
wa_write_clr(wal, GEN8_GARBCNTL, GEN12_BUS_HASH_CTL_BIT_EXC);
}
/*
* The workarounds in this function apply to shared registers in
* the general render reset domain that aren't tied to a
* specific engine. Since all render+compute engines get reset
* together, and the contents of these registers are lost during
* the shared render domain reset, we'll define such workarounds
* here and then add them to just a single RCS or CCS engine's
* workaround list (whichever engine has the XXXX flag).
*/
static void
general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
add_render_compute_tuning_settings(i915, wal);
if (GRAPHICS_VER(i915) >= 11) {
/* This is not a Wa (although referred to as
* WaSetInidrectStateOverride in places), this allows
* applications that reference sampler states through
* the BindlessSamplerStateBaseAddress to have their
* border color relative to DynamicStateBaseAddress
* rather than BindlessSamplerStateBaseAddress.
*
* Otherwise SAMPLER_STATE border colors have to be
* copied in multiple heaps (DynamicStateBaseAddress &
* BindlessSamplerStateBaseAddress)
*
* BSpec: 46052
*/
wa_mcr_masked_en(wal,
GEN10_SAMPLER_MODE,
GEN11_INDIRECT_STATE_BASE_ADDR_OVERRIDE);
}
if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_B0, STEP_FOREVER) ||
IS_MTL_GRAPHICS_STEP(i915, P, STEP_B0, STEP_FOREVER))
/* Wa_14017856879 */
wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN3, MTL_DISABLE_FIX_FOR_EOT_FLUSH);
if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0))
/*
* Wa_14017066071
* Wa_14017654203
*/
wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
MTL_DISABLE_SAMPLER_SC_OOO);
if (IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0))
/* Wa_22015279794 */
wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS,
DISABLE_PREFETCH_INTO_IC);
if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0) ||
IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
IS_DG2_G11(i915) || IS_DG2_G12(i915)) {
/* Wa_22013037850 */
wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW,
DISABLE_128B_EVICTION_COMMAND_UDW);
}
if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0) ||
IS_PONTEVECCHIO(i915) ||
IS_DG2(i915)) {
/* Wa_22014226127 */
wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE);
}
if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0) ||
IS_DG2(i915)) {
/* Wa_18017747507 */
wa_masked_en(wal, VFG_PREEMPTION_CHICKEN, POLYGON_TRIFAN_LINELOOP_DISABLE);
}
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) ||
IS_DG2_G11(i915)) {
/*
* Wa_22012826095:dg2
* Wa_22013059131:dg2
*/
wa_mcr_write_clr_set(wal, LSC_CHICKEN_BIT_0_UDW,
MAXREQS_PER_BANK,
REG_FIELD_PREP(MAXREQS_PER_BANK, 2));
/* Wa_22013059131:dg2 */
wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0,
FORCE_1_SUB_MESSAGE_PER_FRAGMENT);
}
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) {
/*
* Wa_14010918519:dg2_g10
*
* LSC_CHICKEN_BIT_0 always reads back as 0 is this stepping,
* so ignoring verification.
*/
wa_mcr_add(wal, LSC_CHICKEN_BIT_0_UDW, 0,
FORCE_SLM_FENCE_SCOPE_TO_TILE | FORCE_UGM_FENCE_SCOPE_TO_TILE,
0, false);
}
if (IS_XEHPSDV(i915)) {
/* Wa_1409954639 */
wa_mcr_masked_en(wal,
GEN8_ROW_CHICKEN,
SYSTOLIC_DOP_CLOCK_GATING_DIS);
/* Wa_1607196519 */
wa_mcr_masked_en(wal,
GEN9_ROW_CHICKEN4,
GEN12_DISABLE_GRF_CLEAR);
/* Wa_14010449647:xehpsdv */
wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
}
if (IS_DG2(i915) || IS_PONTEVECCHIO(i915)) {
/* Wa_14015227452:dg2,pvc */
wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE);
/* Wa_16015675438:dg2,pvc */
wa_masked_en(wal, FF_SLICE_CS_CHICKEN2, GEN12_PERF_FIX_BALANCING_CFE_DISABLE);
}
if (IS_DG2(i915)) {
/*
* Wa_16011620976:dg2_g11
* Wa_22015475538:dg2
*/
wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8);
}
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_C0) || IS_DG2_G11(i915))
/*
* Wa_22012654132
*
* Note that register 0xE420 is write-only and cannot be read
* back for verification on DG2 (due to Wa_14012342262), so
* we need to explicitly skip the readback.
*/
wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
_MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
0 /* write-only, so skip validation */,
true);
}
static void
engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
if (GRAPHICS_VER(engine->i915) < 4)
return;
engine_fake_wa_init(engine, wal);
/*
* These are common workarounds that just need to applied
* to a single RCS/CCS engine's workaround list since
* they're reset as part of the general render domain reset.
*/
if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE)
general_render_compute_wa_init(engine, wal);
if (engine->class == COMPUTE_CLASS)
ccs_engine_wa_init(engine, wal);
else if (engine->class == RENDER_CLASS)
rcs_engine_wa_init(engine, wal);
else
xcs_engine_wa_init(engine, wal);
}
void intel_engine_init_workarounds(struct intel_engine_cs *engine)
{
struct i915_wa_list *wal = &engine->wa_list;
wa_init_start(wal, engine->gt, "engine", engine->name);
engine_init_workarounds(engine, wal);
wa_init_finish(wal);
}
void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
{
wa_list_apply(&engine->wa_list);
}
static const struct i915_range mcr_ranges_gen8[] = {
{ .start = 0x5500, .end = 0x55ff },
{ .start = 0x7000, .end = 0x7fff },
{ .start = 0x9400, .end = 0x97ff },
{ .start = 0xb000, .end = 0xb3ff },
{ .start = 0xe000, .end = 0xe7ff },
{},
};
static const struct i915_range mcr_ranges_gen12[] = {
{ .start = 0x8150, .end = 0x815f },
{ .start = 0x9520, .end = 0x955f },
{ .start = 0xb100, .end = 0xb3ff },
{ .start = 0xde80, .end = 0xe8ff },
{ .start = 0x24a00, .end = 0x24a7f },
{},
};
static const struct i915_range mcr_ranges_xehp[] = {
{ .start = 0x4000, .end = 0x4aff },
{ .start = 0x5200, .end = 0x52ff },
{ .start = 0x5400, .end = 0x7fff },
{ .start = 0x8140, .end = 0x815f },
{ .start = 0x8c80, .end = 0x8dff },
{ .start = 0x94d0, .end = 0x955f },
{ .start = 0x9680, .end = 0x96ff },
{ .start = 0xb000, .end = 0xb3ff },
{ .start = 0xc800, .end = 0xcfff },
{ .start = 0xd800, .end = 0xd8ff },
{ .start = 0xdc00, .end = 0xffff },
{ .start = 0x17000, .end = 0x17fff },
{ .start = 0x24a00, .end = 0x24a7f },
{},
};
static bool mcr_range(struct drm_i915_private *i915, u32 offset)
{
const struct i915_range *mcr_ranges;
int i;
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
mcr_ranges = mcr_ranges_xehp;
else if (GRAPHICS_VER(i915) >= 12)
mcr_ranges = mcr_ranges_gen12;
else if (GRAPHICS_VER(i915) >= 8)
mcr_ranges = mcr_ranges_gen8;
else
return false;
/*
* Registers in these ranges are affected by the MCR selector
* which only controls CPU initiated MMIO. Routing does not
* work for CS access so we cannot verify them on this path.
*/
for (i = 0; mcr_ranges[i].start; i++)
if (offset >= mcr_ranges[i].start &&
offset <= mcr_ranges[i].end)
return true;
return false;
}
static int
wa_list_srm(struct i915_request *rq,
const struct i915_wa_list *wal,
struct i915_vma *vma)
{
struct drm_i915_private *i915 = rq->i915;
unsigned int i, count = 0;
const struct i915_wa *wa;
u32 srm, *cs;
srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
if (GRAPHICS_VER(i915) >= 8)
srm++;
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
if (!mcr_range(i915, i915_mmio_reg_offset(wa->reg)))
count++;
}
cs = intel_ring_begin(rq, 4 * count);
if (IS_ERR(cs))
return PTR_ERR(cs);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
u32 offset = i915_mmio_reg_offset(wa->reg);
if (mcr_range(i915, offset))
continue;
*cs++ = srm;
*cs++ = offset;
*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
*cs++ = 0;
}
intel_ring_advance(rq, cs);
return 0;
}
static int engine_wa_list_verify(struct intel_context *ce,
const struct i915_wa_list * const wal,
const char *from)
{
const struct i915_wa *wa;
struct i915_request *rq;
struct i915_vma *vma;
struct i915_gem_ww_ctx ww;
unsigned int i;
u32 *results;
int err;
if (!wal->count)
return 0;
vma = __vm_create_scratch_for_read(&ce->engine->gt->ggtt->vm,
wal->count * sizeof(u32));
if (IS_ERR(vma))
return PTR_ERR(vma);
intel_engine_pm_get(ce->engine);
i915_gem_ww_ctx_init(&ww, false);
retry:
err = i915_gem_object_lock(vma->obj, &ww);
if (err == 0)
err = intel_context_pin_ww(ce, &ww);
if (err)
goto err_pm;
err = i915_vma_pin_ww(vma, &ww, 0, 0,
i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
if (err)
goto err_unpin;
rq = i915_request_create(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_vma;
}
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
if (err == 0)
err = wa_list_srm(rq, wal, vma);
i915_request_get(rq);
if (err)
i915_request_set_error_once(rq, err);
i915_request_add(rq);
if (err)
goto err_rq;
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
err = -ETIME;
goto err_rq;
}
results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
if (IS_ERR(results)) {
err = PTR_ERR(results);
goto err_rq;
}
err = 0;
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
if (mcr_range(rq->i915, i915_mmio_reg_offset(wa->reg)))
continue;
if (!wa_verify(wal->gt, wa, results[i], wal->name, from))
err = -ENXIO;
}
i915_gem_object_unpin_map(vma->obj);
err_rq:
i915_request_put(rq);
err_vma:
i915_vma_unpin(vma);
err_unpin:
intel_context_unpin(ce);
err_pm:
if (err == -EDEADLK) {
err = i915_gem_ww_ctx_backoff(&ww);
if (!err)
goto retry;
}
i915_gem_ww_ctx_fini(&ww);
intel_engine_pm_put(ce->engine);
i915_vma_put(vma);
return err;
}
int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
const char *from)
{
return engine_wa_list_verify(engine->kernel_context,
&engine->wa_list,
from);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_workarounds.c"
#endif
| linux-master | drivers/gpu/drm/i915/gt/intel_workarounds.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2018 Intel Corporation
*/
#include <linux/sort.h>
#include "i915_drv.h"
#include "intel_gt_requests.h"
#include "i915_selftest.h"
#include "selftest_engine_heartbeat.h"
static void reset_heartbeat(struct intel_engine_cs *engine)
{
intel_engine_set_heartbeat(engine,
engine->defaults.heartbeat_interval_ms);
}
static int timeline_sync(struct intel_timeline *tl)
{
struct dma_fence *fence;
long timeout;
fence = i915_active_fence_get(&tl->last_request);
if (!fence)
return 0;
timeout = dma_fence_wait_timeout(fence, true, HZ / 2);
dma_fence_put(fence);
if (timeout < 0)
return timeout;
return 0;
}
static int engine_sync_barrier(struct intel_engine_cs *engine)
{
return timeline_sync(engine->kernel_context->timeline);
}
struct pulse {
struct i915_active active;
struct kref kref;
};
static int pulse_active(struct i915_active *active)
{
kref_get(&container_of(active, struct pulse, active)->kref);
return 0;
}
static void pulse_free(struct kref *kref)
{
struct pulse *p = container_of(kref, typeof(*p), kref);
i915_active_fini(&p->active);
kfree(p);
}
static void pulse_put(struct pulse *p)
{
kref_put(&p->kref, pulse_free);
}
static void pulse_retire(struct i915_active *active)
{
pulse_put(container_of(active, struct pulse, active));
}
static struct pulse *pulse_create(void)
{
struct pulse *p;
p = kmalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return p;
kref_init(&p->kref);
i915_active_init(&p->active, pulse_active, pulse_retire, 0);
return p;
}
static void pulse_unlock_wait(struct pulse *p)
{
i915_active_unlock_wait(&p->active);
}
static int __live_idle_pulse(struct intel_engine_cs *engine,
int (*fn)(struct intel_engine_cs *cs))
{
struct pulse *p;
int err;
GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
p = pulse_create();
if (!p)
return -ENOMEM;
err = i915_active_acquire(&p->active);
if (err)
goto out;
err = i915_active_acquire_preallocate_barrier(&p->active, engine);
if (err) {
i915_active_release(&p->active);
goto out;
}
i915_active_acquire_barrier(&p->active);
i915_active_release(&p->active);
GEM_BUG_ON(i915_active_is_idle(&p->active));
GEM_BUG_ON(llist_empty(&engine->barrier_tasks));
err = fn(engine);
if (err)
goto out;
GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
if (engine_sync_barrier(engine)) {
struct drm_printer m = drm_err_printer("pulse");
pr_err("%s: no heartbeat pulse?\n", engine->name);
intel_engine_dump(engine, &m, "%s", engine->name);
err = -ETIME;
goto out;
}
GEM_BUG_ON(READ_ONCE(engine->serial) != engine->wakeref_serial);
pulse_unlock_wait(p); /* synchronize with the retirement callback */
if (!i915_active_is_idle(&p->active)) {
struct drm_printer m = drm_err_printer("pulse");
pr_err("%s: heartbeat pulse did not flush idle tasks\n",
engine->name);
i915_active_print(&p->active, &m);
err = -EINVAL;
goto out;
}
out:
pulse_put(p);
return err;
}
static int live_idle_flush(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = 0;
/* Check that we can flush the idle barriers */
for_each_engine(engine, gt, id) {
st_engine_heartbeat_disable(engine);
err = __live_idle_pulse(engine, intel_engine_flush_barriers);
st_engine_heartbeat_enable(engine);
if (err)
break;
}
return err;
}
static int live_idle_pulse(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = 0;
/* Check that heartbeat pulses flush the idle barriers */
for_each_engine(engine, gt, id) {
st_engine_heartbeat_disable(engine);
err = __live_idle_pulse(engine, intel_engine_pulse);
st_engine_heartbeat_enable(engine);
if (err && err != -ENODEV)
break;
err = 0;
}
return err;
}
static int cmp_u32(const void *_a, const void *_b)
{
const u32 *a = _a, *b = _b;
return *a - *b;
}
static int __live_heartbeat_fast(struct intel_engine_cs *engine)
{
const unsigned int error_threshold = max(20000u, jiffies_to_usecs(6));
struct intel_context *ce;
struct i915_request *rq;
ktime_t t0, t1;
u32 times[5];
int err;
int i;
ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
intel_engine_pm_get(engine);
err = intel_engine_set_heartbeat(engine, 1);
if (err)
goto err_pm;
for (i = 0; i < ARRAY_SIZE(times); i++) {
do {
/* Manufacture a tick */
intel_engine_park_heartbeat(engine);
GEM_BUG_ON(engine->heartbeat.systole);
engine->serial++; /* pretend we are not idle! */
intel_engine_unpark_heartbeat(engine);
flush_delayed_work(&engine->heartbeat.work);
if (!delayed_work_pending(&engine->heartbeat.work)) {
pr_err("%s: heartbeat %d did not start\n",
engine->name, i);
err = -EINVAL;
goto err_pm;
}
rcu_read_lock();
rq = READ_ONCE(engine->heartbeat.systole);
if (rq)
rq = i915_request_get_rcu(rq);
rcu_read_unlock();
} while (!rq);
t0 = ktime_get();
while (rq == READ_ONCE(engine->heartbeat.systole))
yield(); /* work is on the local cpu! */
t1 = ktime_get();
i915_request_put(rq);
times[i] = ktime_us_delta(t1, t0);
}
sort(times, ARRAY_SIZE(times), sizeof(times[0]), cmp_u32, NULL);
pr_info("%s: Heartbeat delay: %uus [%u, %u]\n",
engine->name,
times[ARRAY_SIZE(times) / 2],
times[0],
times[ARRAY_SIZE(times) - 1]);
/*
* Ideally, the upper bound on min work delay would be something like
* 2 * 2 (worst), +1 for scheduling, +1 for slack. In practice, we
* are, even with system_wq_highpri, at the mercy of the CPU scheduler
* and may be stuck behind some slow work for many millisecond. Such
* as our very own display workers.
*/
if (times[ARRAY_SIZE(times) / 2] > error_threshold) {
pr_err("%s: Heartbeat delay was %uus, expected less than %dus\n",
engine->name,
times[ARRAY_SIZE(times) / 2],
error_threshold);
err = -EINVAL;
}
reset_heartbeat(engine);
err_pm:
intel_engine_pm_put(engine);
intel_context_put(ce);
return err;
}
static int live_heartbeat_fast(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = 0;
/* Check that the heartbeat ticks at the desired rate. */
if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL)
return 0;
for_each_engine(engine, gt, id) {
err = __live_heartbeat_fast(engine);
if (err)
break;
}
return err;
}
static int __live_heartbeat_off(struct intel_engine_cs *engine)
{
int err;
intel_engine_pm_get(engine);
engine->serial++;
flush_delayed_work(&engine->heartbeat.work);
if (!delayed_work_pending(&engine->heartbeat.work)) {
pr_err("%s: heartbeat not running\n",
engine->name);
err = -EINVAL;
goto err_pm;
}
err = intel_engine_set_heartbeat(engine, 0);
if (err)
goto err_pm;
engine->serial++;
flush_delayed_work(&engine->heartbeat.work);
if (delayed_work_pending(&engine->heartbeat.work)) {
pr_err("%s: heartbeat still running\n",
engine->name);
err = -EINVAL;
goto err_beat;
}
if (READ_ONCE(engine->heartbeat.systole)) {
pr_err("%s: heartbeat still allocated\n",
engine->name);
err = -EINVAL;
goto err_beat;
}
err_beat:
reset_heartbeat(engine);
err_pm:
intel_engine_pm_put(engine);
return err;
}
static int live_heartbeat_off(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = 0;
/* Check that we can turn off heartbeat and not interrupt VIP */
if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL)
return 0;
for_each_engine(engine, gt, id) {
if (!intel_engine_has_preemption(engine))
continue;
err = __live_heartbeat_off(engine);
if (err)
break;
}
return err;
}
int intel_heartbeat_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_idle_flush),
SUBTEST(live_idle_pulse),
SUBTEST(live_heartbeat_fast),
SUBTEST(live_heartbeat_off),
};
int saved_hangcheck;
int err;
if (intel_gt_is_wedged(to_gt(i915)))
return 0;
saved_hangcheck = i915->params.enable_hangcheck;
i915->params.enable_hangcheck = INT_MAX;
err = intel_gt_live_subtests(tests, to_gt(i915));
i915->params.enable_hangcheck = saved_hangcheck;
return err;
}
void st_engine_heartbeat_disable(struct intel_engine_cs *engine)
{
engine->props.heartbeat_interval_ms = 0;
intel_engine_pm_get(engine);
intel_engine_park_heartbeat(engine);
}
void st_engine_heartbeat_enable(struct intel_engine_cs *engine)
{
intel_engine_pm_put(engine);
engine->props.heartbeat_interval_ms =
engine->defaults.heartbeat_interval_ms;
}
void st_engine_heartbeat_disable_no_pm(struct intel_engine_cs *engine)
{
engine->props.heartbeat_interval_ms = 0;
/*
* Park the heartbeat but without holding the PM lock as that
* makes the engines appear not-idle. Note that if/when unpark
* is called due to the PM lock being acquired later the
* heartbeat still won't be enabled because of the above = 0.
*/
if (intel_engine_pm_get_if_awake(engine)) {
intel_engine_park_heartbeat(engine);
intel_engine_pm_put(engine);
}
}
void st_engine_heartbeat_enable_no_pm(struct intel_engine_cs *engine)
{
engine->props.heartbeat_interval_ms =
engine->defaults.heartbeat_interval_ms;
}
| linux-master | drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
#include <linux/string_helpers.h>
#include <drm/i915_drm.h>
#include "display/intel_display.h"
#include "display/intel_display_irq.h"
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
#include "intel_breadcrumbs.h"
#include "intel_gt.h"
#include "intel_gt_clock_utils.h"
#include "intel_gt_irq.h"
#include "intel_gt_pm.h"
#include "intel_gt_pm_irq.h"
#include "intel_gt_print.h"
#include "intel_gt_regs.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
#include "intel_rps.h"
#include "vlv_sideband.h"
#include "../../../platform/x86/intel_ips.h"
#define BUSY_MAX_EI 20u /* ms */
/*
* Lock protecting IPS related data structures
*/
static DEFINE_SPINLOCK(mchdev_lock);
static struct intel_gt *rps_to_gt(struct intel_rps *rps)
{
return container_of(rps, struct intel_gt, rps);
}
static struct drm_i915_private *rps_to_i915(struct intel_rps *rps)
{
return rps_to_gt(rps)->i915;
}
static struct intel_uncore *rps_to_uncore(struct intel_rps *rps)
{
return rps_to_gt(rps)->uncore;
}
static struct intel_guc_slpc *rps_to_slpc(struct intel_rps *rps)
{
struct intel_gt *gt = rps_to_gt(rps);
return >->uc.guc.slpc;
}
static bool rps_uses_slpc(struct intel_rps *rps)
{
struct intel_gt *gt = rps_to_gt(rps);
return intel_uc_uses_guc_slpc(>->uc);
}
static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask)
{
return mask & ~rps->pm_intrmsk_mbz;
}
static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
{
intel_uncore_write_fw(uncore, reg, val);
}
static void rps_timer(struct timer_list *t)
{
struct intel_rps *rps = from_timer(rps, t, timer);
struct intel_gt *gt = rps_to_gt(rps);
struct intel_engine_cs *engine;
ktime_t dt, last, timestamp;
enum intel_engine_id id;
s64 max_busy[3] = {};
timestamp = 0;
for_each_engine(engine, gt, id) {
s64 busy;
int i;
dt = intel_engine_get_busy_time(engine, ×tamp);
last = engine->stats.rps;
engine->stats.rps = dt;
busy = ktime_to_ns(ktime_sub(dt, last));
for (i = 0; i < ARRAY_SIZE(max_busy); i++) {
if (busy > max_busy[i])
swap(busy, max_busy[i]);
}
}
last = rps->pm_timestamp;
rps->pm_timestamp = timestamp;
if (intel_rps_is_active(rps)) {
s64 busy;
int i;
dt = ktime_sub(timestamp, last);
/*
* Our goal is to evaluate each engine independently, so we run
* at the lowest clocks required to sustain the heaviest
* workload. However, a task may be split into sequential
* dependent operations across a set of engines, such that
* the independent contributions do not account for high load,
* but overall the task is GPU bound. For example, consider
* video decode on vcs followed by colour post-processing
* on vecs, followed by general post-processing on rcs.
* Since multi-engines being active does imply a single
* continuous workload across all engines, we hedge our
* bets by only contributing a factor of the distributed
* load into our busyness calculation.
*/
busy = max_busy[0];
for (i = 1; i < ARRAY_SIZE(max_busy); i++) {
if (!max_busy[i])
break;
busy += div_u64(max_busy[i], 1 << i);
}
GT_TRACE(gt,
"busy:%lld [%d%%], max:[%lld, %lld, %lld], interval:%d\n",
busy, (int)div64_u64(100 * busy, dt),
max_busy[0], max_busy[1], max_busy[2],
rps->pm_interval);
if (100 * busy > rps->power.up_threshold * dt &&
rps->cur_freq < rps->max_freq_softlimit) {
rps->pm_iir |= GEN6_PM_RP_UP_THRESHOLD;
rps->pm_interval = 1;
queue_work(gt->i915->unordered_wq, &rps->work);
} else if (100 * busy < rps->power.down_threshold * dt &&
rps->cur_freq > rps->min_freq_softlimit) {
rps->pm_iir |= GEN6_PM_RP_DOWN_THRESHOLD;
rps->pm_interval = 1;
queue_work(gt->i915->unordered_wq, &rps->work);
} else {
rps->last_adj = 0;
}
mod_timer(&rps->timer,
jiffies + msecs_to_jiffies(rps->pm_interval));
rps->pm_interval = min(rps->pm_interval * 2, BUSY_MAX_EI);
}
}
static void rps_start_timer(struct intel_rps *rps)
{
rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp);
rps->pm_interval = 1;
mod_timer(&rps->timer, jiffies + 1);
}
static void rps_stop_timer(struct intel_rps *rps)
{
del_timer_sync(&rps->timer);
rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp);
cancel_work_sync(&rps->work);
}
static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
{
u32 mask = 0;
/* We use UP_EI_EXPIRED interrupts for both up/down in manual mode */
if (val > rps->min_freq_softlimit)
mask |= (GEN6_PM_RP_UP_EI_EXPIRED |
GEN6_PM_RP_DOWN_THRESHOLD |
GEN6_PM_RP_DOWN_TIMEOUT);
if (val < rps->max_freq_softlimit)
mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
mask &= rps->pm_events;
return rps_pm_sanitize_mask(rps, ~mask);
}
static void rps_reset_ei(struct intel_rps *rps)
{
memset(&rps->ei, 0, sizeof(rps->ei));
}
static void rps_enable_interrupts(struct intel_rps *rps)
{
struct intel_gt *gt = rps_to_gt(rps);
GEM_BUG_ON(rps_uses_slpc(rps));
GT_TRACE(gt, "interrupts:on rps->pm_events: %x, rps_pm_mask:%x\n",
rps->pm_events, rps_pm_mask(rps, rps->last_freq));
rps_reset_ei(rps);
spin_lock_irq(gt->irq_lock);
gen6_gt_pm_enable_irq(gt, rps->pm_events);
spin_unlock_irq(gt->irq_lock);
intel_uncore_write(gt->uncore,
GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq));
}
static void gen6_rps_reset_interrupts(struct intel_rps *rps)
{
gen6_gt_pm_reset_iir(rps_to_gt(rps), GEN6_PM_RPS_EVENTS);
}
static void gen11_rps_reset_interrupts(struct intel_rps *rps)
{
while (gen11_gt_reset_one_iir(rps_to_gt(rps), 0, GEN11_GTPM))
;
}
static void rps_reset_interrupts(struct intel_rps *rps)
{
struct intel_gt *gt = rps_to_gt(rps);
spin_lock_irq(gt->irq_lock);
if (GRAPHICS_VER(gt->i915) >= 11)
gen11_rps_reset_interrupts(rps);
else
gen6_rps_reset_interrupts(rps);
rps->pm_iir = 0;
spin_unlock_irq(gt->irq_lock);
}
static void rps_disable_interrupts(struct intel_rps *rps)
{
struct intel_gt *gt = rps_to_gt(rps);
intel_uncore_write(gt->uncore,
GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
spin_lock_irq(gt->irq_lock);
gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
spin_unlock_irq(gt->irq_lock);
intel_synchronize_irq(gt->i915);
/*
* Now that we will not be generating any more work, flush any
* outstanding tasks. As we are called on the RPS idle path,
* we will reset the GPU to minimum frequencies, so the current
* state of the worker can be discarded.
*/
cancel_work_sync(&rps->work);
rps_reset_interrupts(rps);
GT_TRACE(gt, "interrupts:off\n");
}
static const struct cparams {
u16 i;
u16 t;
u16 m;
u16 c;
} cparams[] = {
{ 1, 1333, 301, 28664 },
{ 1, 1066, 294, 24460 },
{ 1, 800, 294, 25192 },
{ 0, 1333, 276, 27605 },
{ 0, 1066, 276, 27605 },
{ 0, 800, 231, 23784 },
};
static void gen5_rps_init(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
struct intel_uncore *uncore = rps_to_uncore(rps);
u8 fmax, fmin, fstart;
u32 rgvmodectl;
int c_m, i;
if (i915->fsb_freq <= 3200)
c_m = 0;
else if (i915->fsb_freq <= 4800)
c_m = 1;
else
c_m = 2;
for (i = 0; i < ARRAY_SIZE(cparams); i++) {
if (cparams[i].i == c_m && cparams[i].t == i915->mem_freq) {
rps->ips.m = cparams[i].m;
rps->ips.c = cparams[i].c;
break;
}
}
rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
/* Set up min, max, and cur for interrupt handling */
fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
MEMMODE_FSTART_SHIFT;
drm_dbg(&i915->drm, "fmax: %d, fmin: %d, fstart: %d\n",
fmax, fmin, fstart);
rps->min_freq = fmax;
rps->efficient_freq = fstart;
rps->max_freq = fmin;
}
static unsigned long
__ips_chipset_val(struct intel_ips *ips)
{
struct intel_uncore *uncore =
rps_to_uncore(container_of(ips, struct intel_rps, ips));
unsigned long now = jiffies_to_msecs(jiffies), dt;
unsigned long result;
u64 total, delta;
lockdep_assert_held(&mchdev_lock);
/*
* Prevent division-by-zero if we are asking too fast.
* Also, we don't get interesting results if we are polling
* faster than once in 10ms, so just return the saved value
* in such cases.
*/
dt = now - ips->last_time1;
if (dt <= 10)
return ips->chipset_power;
/* FIXME: handle per-counter overflow */
total = intel_uncore_read(uncore, DMIEC);
total += intel_uncore_read(uncore, DDREC);
total += intel_uncore_read(uncore, CSIEC);
delta = total - ips->last_count1;
result = div_u64(div_u64(ips->m * delta, dt) + ips->c, 10);
ips->last_count1 = total;
ips->last_time1 = now;
ips->chipset_power = result;
return result;
}
static unsigned long ips_mch_val(struct intel_uncore *uncore)
{
unsigned int m, x, b;
u32 tsfs;
tsfs = intel_uncore_read(uncore, TSFS);
x = intel_uncore_read8(uncore, TR1);
b = tsfs & TSFS_INTR_MASK;
m = (tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT;
return m * x / 127 - b;
}
static int _pxvid_to_vd(u8 pxvid)
{
if (pxvid == 0)
return 0;
if (pxvid >= 8 && pxvid < 31)
pxvid = 31;
return (pxvid + 2) * 125;
}
static u32 pvid_to_extvid(struct drm_i915_private *i915, u8 pxvid)
{
const int vd = _pxvid_to_vd(pxvid);
if (INTEL_INFO(i915)->is_mobile)
return max(vd - 1125, 0);
return vd;
}
static void __gen5_ips_update(struct intel_ips *ips)
{
struct intel_uncore *uncore =
rps_to_uncore(container_of(ips, struct intel_rps, ips));
u64 now, delta, dt;
u32 count;
lockdep_assert_held(&mchdev_lock);
now = ktime_get_raw_ns();
dt = now - ips->last_time2;
do_div(dt, NSEC_PER_MSEC);
/* Don't divide by 0 */
if (dt <= 10)
return;
count = intel_uncore_read(uncore, GFXEC);
delta = count - ips->last_count2;
ips->last_count2 = count;
ips->last_time2 = now;
/* More magic constants... */
ips->gfx_power = div_u64(delta * 1181, dt * 10);
}
static void gen5_rps_update(struct intel_rps *rps)
{
spin_lock_irq(&mchdev_lock);
__gen5_ips_update(&rps->ips);
spin_unlock_irq(&mchdev_lock);
}
static unsigned int gen5_invert_freq(struct intel_rps *rps,
unsigned int val)
{
/* Invert the frequency bin into an ips delay */
val = rps->max_freq - val;
val = rps->min_freq + val;
return val;
}
static int __gen5_rps_set(struct intel_rps *rps, u8 val)
{
struct intel_uncore *uncore = rps_to_uncore(rps);
u16 rgvswctl;
lockdep_assert_held(&mchdev_lock);
rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
if (rgvswctl & MEMCTL_CMD_STS) {
drm_dbg(&rps_to_i915(rps)->drm,
"gpu busy, RCS change rejected\n");
return -EBUSY; /* still busy with another command */
}
/* Invert the frequency bin into an ips delay */
val = gen5_invert_freq(rps, val);
rgvswctl =
(MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
(val << MEMCTL_FREQ_SHIFT) |
MEMCTL_SFCAVM;
intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
intel_uncore_posting_read16(uncore, MEMSWCTL);
rgvswctl |= MEMCTL_CMD_STS;
intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
return 0;
}
static int gen5_rps_set(struct intel_rps *rps, u8 val)
{
int err;
spin_lock_irq(&mchdev_lock);
err = __gen5_rps_set(rps, val);
spin_unlock_irq(&mchdev_lock);
return err;
}
static unsigned long intel_pxfreq(u32 vidfreq)
{
int div = (vidfreq & 0x3f0000) >> 16;
int post = (vidfreq & 0x3000) >> 12;
int pre = (vidfreq & 0x7);
if (!pre)
return 0;
return div * 133333 / (pre << post);
}
static unsigned int init_emon(struct intel_uncore *uncore)
{
u8 pxw[16];
int i;
/* Disable to program */
intel_uncore_write(uncore, ECR, 0);
intel_uncore_posting_read(uncore, ECR);
/* Program energy weights for various events */
intel_uncore_write(uncore, SDEW, 0x15040d00);
intel_uncore_write(uncore, CSIEW0, 0x007f0000);
intel_uncore_write(uncore, CSIEW1, 0x1e220004);
intel_uncore_write(uncore, CSIEW2, 0x04000004);
for (i = 0; i < 5; i++)
intel_uncore_write(uncore, PEW(i), 0);
for (i = 0; i < 3; i++)
intel_uncore_write(uncore, DEW(i), 0);
/* Program P-state weights to account for frequency power adjustment */
for (i = 0; i < 16; i++) {
u32 pxvidfreq = intel_uncore_read(uncore, PXVFREQ(i));
unsigned int freq = intel_pxfreq(pxvidfreq);
unsigned int vid =
(pxvidfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
unsigned int val;
val = vid * vid * freq / 1000 * 255;
val /= 127 * 127 * 900;
pxw[i] = val;
}
/* Render standby states get 0 weight */
pxw[14] = 0;
pxw[15] = 0;
for (i = 0; i < 4; i++) {
intel_uncore_write(uncore, PXW(i),
pxw[i * 4 + 0] << 24 |
pxw[i * 4 + 1] << 16 |
pxw[i * 4 + 2] << 8 |
pxw[i * 4 + 3] << 0);
}
/* Adjust magic regs to magic values (more experimental results) */
intel_uncore_write(uncore, OGW0, 0);
intel_uncore_write(uncore, OGW1, 0);
intel_uncore_write(uncore, EG0, 0x00007f00);
intel_uncore_write(uncore, EG1, 0x0000000e);
intel_uncore_write(uncore, EG2, 0x000e0000);
intel_uncore_write(uncore, EG3, 0x68000300);
intel_uncore_write(uncore, EG4, 0x42000000);
intel_uncore_write(uncore, EG5, 0x00140031);
intel_uncore_write(uncore, EG6, 0);
intel_uncore_write(uncore, EG7, 0);
for (i = 0; i < 8; i++)
intel_uncore_write(uncore, PXWL(i), 0);
/* Enable PMON + select events */
intel_uncore_write(uncore, ECR, 0x80000019);
return intel_uncore_read(uncore, LCFUSE02) & LCFUSE_HIV_MASK;
}
static bool gen5_rps_enable(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
struct intel_uncore *uncore = rps_to_uncore(rps);
u8 fstart, vstart;
u32 rgvmodectl;
spin_lock_irq(&mchdev_lock);
rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
/* Enable temp reporting */
intel_uncore_write16(uncore, PMMISC,
intel_uncore_read16(uncore, PMMISC) | MCPPCE_EN);
intel_uncore_write16(uncore, TSC1,
intel_uncore_read16(uncore, TSC1) | TSE);
/* 100ms RC evaluation intervals */
intel_uncore_write(uncore, RCUPEI, 100000);
intel_uncore_write(uncore, RCDNEI, 100000);
/* Set max/min thresholds to 90ms and 80ms respectively */
intel_uncore_write(uncore, RCBMAXAVG, 90000);
intel_uncore_write(uncore, RCBMINAVG, 80000);
intel_uncore_write(uncore, MEMIHYST, 1);
/* Set up min, max, and cur for interrupt handling */
fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
MEMMODE_FSTART_SHIFT;
vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) &
PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
intel_uncore_write(uncore,
MEMINTREN,
MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
intel_uncore_write(uncore, VIDSTART, vstart);
intel_uncore_posting_read(uncore, VIDSTART);
rgvmodectl |= MEMMODE_SWMODE_EN;
intel_uncore_write(uncore, MEMMODECTL, rgvmodectl);
if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) &
MEMCTL_CMD_STS) == 0, 10))
drm_err(&uncore->i915->drm,
"stuck trying to change perf mode\n");
mdelay(1);
__gen5_rps_set(rps, rps->cur_freq);
rps->ips.last_count1 = intel_uncore_read(uncore, DMIEC);
rps->ips.last_count1 += intel_uncore_read(uncore, DDREC);
rps->ips.last_count1 += intel_uncore_read(uncore, CSIEC);
rps->ips.last_time1 = jiffies_to_msecs(jiffies);
rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC);
rps->ips.last_time2 = ktime_get_raw_ns();
spin_lock(&i915->irq_lock);
ilk_enable_display_irq(i915, DE_PCU_EVENT);
spin_unlock(&i915->irq_lock);
spin_unlock_irq(&mchdev_lock);
rps->ips.corr = init_emon(uncore);
return true;
}
static void gen5_rps_disable(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
struct intel_uncore *uncore = rps_to_uncore(rps);
u16 rgvswctl;
spin_lock_irq(&mchdev_lock);
spin_lock(&i915->irq_lock);
ilk_disable_display_irq(i915, DE_PCU_EVENT);
spin_unlock(&i915->irq_lock);
rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
/* Ack interrupts, disable EFC interrupt */
intel_uncore_rmw(uncore, MEMINTREN, MEMINT_EVAL_CHG_EN, 0);
intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
/* Go back to the starting frequency */
__gen5_rps_set(rps, rps->idle_freq);
mdelay(1);
rgvswctl |= MEMCTL_CMD_STS;
intel_uncore_write(uncore, MEMSWCTL, rgvswctl);
mdelay(1);
spin_unlock_irq(&mchdev_lock);
}
static u32 rps_limits(struct intel_rps *rps, u8 val)
{
u32 limits;
/*
* Only set the down limit when we've reached the lowest level to avoid
* getting more interrupts, otherwise leave this clear. This prevents a
* race in the hw when coming out of rc6: There's a tiny window where
* the hw runs at the minimal clock before selecting the desired
* frequency, if the down threshold expires in that window we will not
* receive a down interrupt.
*/
if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) {
limits = rps->max_freq_softlimit << 23;
if (val <= rps->min_freq_softlimit)
limits |= rps->min_freq_softlimit << 14;
} else {
limits = rps->max_freq_softlimit << 24;
if (val <= rps->min_freq_softlimit)
limits |= rps->min_freq_softlimit << 16;
}
return limits;
}
static void rps_set_power(struct intel_rps *rps, int new_power)
{
struct intel_gt *gt = rps_to_gt(rps);
struct intel_uncore *uncore = gt->uncore;
u32 ei_up = 0, ei_down = 0;
lockdep_assert_held(&rps->power.mutex);
if (new_power == rps->power.mode)
return;
/* Note the units here are not exactly 1us, but 1280ns. */
switch (new_power) {
case LOW_POWER:
ei_up = 16000;
ei_down = 32000;
break;
case BETWEEN:
ei_up = 13000;
ei_down = 32000;
break;
case HIGH_POWER:
ei_up = 10000;
ei_down = 32000;
break;
}
/* When byt can survive without system hang with dynamic
* sw freq adjustments, this restriction can be lifted.
*/
if (IS_VALLEYVIEW(gt->i915))
goto skip_hw_write;
GT_TRACE(gt,
"changing power mode [%d], up %d%% @ %dus, down %d%% @ %dus\n",
new_power,
rps->power.up_threshold, ei_up,
rps->power.down_threshold, ei_down);
set(uncore, GEN6_RP_UP_EI,
intel_gt_ns_to_pm_interval(gt, ei_up * 1000));
set(uncore, GEN6_RP_UP_THRESHOLD,
intel_gt_ns_to_pm_interval(gt,
ei_up * rps->power.up_threshold * 10));
set(uncore, GEN6_RP_DOWN_EI,
intel_gt_ns_to_pm_interval(gt, ei_down * 1000));
set(uncore, GEN6_RP_DOWN_THRESHOLD,
intel_gt_ns_to_pm_interval(gt,
ei_down *
rps->power.down_threshold * 10));
set(uncore, GEN6_RP_CONTROL,
(GRAPHICS_VER(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
GEN6_RP_MEDIA_HW_NORMAL_MODE |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG);
skip_hw_write:
rps->power.mode = new_power;
}
static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val)
{
int new_power;
new_power = rps->power.mode;
switch (rps->power.mode) {
case LOW_POWER:
if (val > rps->efficient_freq + 1 &&
val > rps->cur_freq)
new_power = BETWEEN;
break;
case BETWEEN:
if (val <= rps->efficient_freq &&
val < rps->cur_freq)
new_power = LOW_POWER;
else if (val >= rps->rp0_freq &&
val > rps->cur_freq)
new_power = HIGH_POWER;
break;
case HIGH_POWER:
if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
val < rps->cur_freq)
new_power = BETWEEN;
break;
}
/* Max/min bins are special */
if (val <= rps->min_freq_softlimit)
new_power = LOW_POWER;
if (val >= rps->max_freq_softlimit)
new_power = HIGH_POWER;
mutex_lock(&rps->power.mutex);
if (rps->power.interactive)
new_power = HIGH_POWER;
rps_set_power(rps, new_power);
mutex_unlock(&rps->power.mutex);
}
void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive)
{
GT_TRACE(rps_to_gt(rps), "mark interactive: %s\n",
str_yes_no(interactive));
mutex_lock(&rps->power.mutex);
if (interactive) {
if (!rps->power.interactive++ && intel_rps_is_active(rps))
rps_set_power(rps, HIGH_POWER);
} else {
GEM_BUG_ON(!rps->power.interactive);
rps->power.interactive--;
}
mutex_unlock(&rps->power.mutex);
}
static int gen6_rps_set(struct intel_rps *rps, u8 val)
{
struct intel_uncore *uncore = rps_to_uncore(rps);
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 swreq;
GEM_BUG_ON(rps_uses_slpc(rps));
if (GRAPHICS_VER(i915) >= 9)
swreq = GEN9_FREQUENCY(val);
else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
swreq = HSW_FREQUENCY(val);
else
swreq = (GEN6_FREQUENCY(val) |
GEN6_OFFSET(0) |
GEN6_AGGRESSIVE_TURBO);
set(uncore, GEN6_RPNSWREQ, swreq);
GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d, swreq:%x\n",
val, intel_gpu_freq(rps, val), swreq);
return 0;
}
static int vlv_rps_set(struct intel_rps *rps, u8 val)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
int err;
vlv_punit_get(i915);
err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val);
vlv_punit_put(i915);
GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d\n",
val, intel_gpu_freq(rps, val));
return err;
}
static int rps_set(struct intel_rps *rps, u8 val, bool update)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
int err;
if (val == rps->last_freq)
return 0;
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
err = vlv_rps_set(rps, val);
else if (GRAPHICS_VER(i915) >= 6)
err = gen6_rps_set(rps, val);
else
err = gen5_rps_set(rps, val);
if (err)
return err;
if (update && GRAPHICS_VER(i915) >= 6)
gen6_rps_set_thresholds(rps, val);
rps->last_freq = val;
return 0;
}
void intel_rps_unpark(struct intel_rps *rps)
{
if (!intel_rps_is_enabled(rps))
return;
GT_TRACE(rps_to_gt(rps), "unpark:%x\n", rps->cur_freq);
/*
* Use the user's desired frequency as a guide, but for better
* performance, jump directly to RPe as our starting frequency.
*/
mutex_lock(&rps->lock);
intel_rps_set_active(rps);
intel_rps_set(rps,
clamp(rps->cur_freq,
rps->min_freq_softlimit,
rps->max_freq_softlimit));
mutex_unlock(&rps->lock);
rps->pm_iir = 0;
if (intel_rps_has_interrupts(rps))
rps_enable_interrupts(rps);
if (intel_rps_uses_timer(rps))
rps_start_timer(rps);
if (GRAPHICS_VER(rps_to_i915(rps)) == 5)
gen5_rps_update(rps);
}
void intel_rps_park(struct intel_rps *rps)
{
int adj;
if (!intel_rps_is_enabled(rps))
return;
if (!intel_rps_clear_active(rps))
return;
if (intel_rps_uses_timer(rps))
rps_stop_timer(rps);
if (intel_rps_has_interrupts(rps))
rps_disable_interrupts(rps);
if (rps->last_freq <= rps->idle_freq)
return;
/*
* The punit delays the write of the frequency and voltage until it
* determines the GPU is awake. During normal usage we don't want to
* waste power changing the frequency if the GPU is sleeping (rc6).
* However, the GPU and driver is now idle and we do not want to delay
* switching to minimum voltage (reducing power whilst idle) as we do
* not expect to be woken in the near future and so must flush the
* change by waking the device.
*
* We choose to take the media powerwell (either would do to trick the
* punit into committing the voltage change) as that takes a lot less
* power than the render powerwell.
*/
intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA);
rps_set(rps, rps->idle_freq, false);
intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA);
/*
* Since we will try and restart from the previously requested
* frequency on unparking, treat this idle point as a downclock
* interrupt and reduce the frequency for resume. If we park/unpark
* more frequently than the rps worker can run, we will not respond
* to any EI and never see a change in frequency.
*
* (Note we accommodate Cherryview's limitation of only using an
* even bin by applying it to all.)
*/
adj = rps->last_adj;
if (adj < 0)
adj *= 2;
else /* CHV needs even encode values */
adj = -2;
rps->last_adj = adj;
rps->cur_freq = max_t(int, rps->cur_freq + adj, rps->min_freq);
if (rps->cur_freq < rps->efficient_freq) {
rps->cur_freq = rps->efficient_freq;
rps->last_adj = 0;
}
GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq);
}
u32 intel_rps_get_boost_frequency(struct intel_rps *rps)
{
struct intel_guc_slpc *slpc;
if (rps_uses_slpc(rps)) {
slpc = rps_to_slpc(rps);
return slpc->boost_freq;
} else {
return intel_gpu_freq(rps, rps->boost_freq);
}
}
static int rps_set_boost_freq(struct intel_rps *rps, u32 val)
{
bool boost = false;
/* Validate against (static) hardware limits */
val = intel_freq_opcode(rps, val);
if (val < rps->min_freq || val > rps->max_freq)
return -EINVAL;
mutex_lock(&rps->lock);
if (val != rps->boost_freq) {
rps->boost_freq = val;
boost = atomic_read(&rps->num_waiters);
}
mutex_unlock(&rps->lock);
if (boost)
queue_work(rps_to_gt(rps)->i915->unordered_wq, &rps->work);
return 0;
}
int intel_rps_set_boost_frequency(struct intel_rps *rps, u32 freq)
{
struct intel_guc_slpc *slpc;
if (rps_uses_slpc(rps)) {
slpc = rps_to_slpc(rps);
return intel_guc_slpc_set_boost_freq(slpc, freq);
} else {
return rps_set_boost_freq(rps, freq);
}
}
void intel_rps_dec_waiters(struct intel_rps *rps)
{
struct intel_guc_slpc *slpc;
if (rps_uses_slpc(rps)) {
slpc = rps_to_slpc(rps);
intel_guc_slpc_dec_waiters(slpc);
} else {
atomic_dec(&rps->num_waiters);
}
}
void intel_rps_boost(struct i915_request *rq)
{
struct intel_guc_slpc *slpc;
if (i915_request_signaled(rq) || i915_request_has_waitboost(rq))
return;
/* Serializes with i915_request_retire() */
if (!test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) {
struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
if (rps_uses_slpc(rps)) {
slpc = rps_to_slpc(rps);
if (slpc->min_freq_softlimit >= slpc->boost_freq)
return;
/* Return if old value is non zero */
if (!atomic_fetch_inc(&slpc->num_waiters)) {
GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n",
rq->fence.context, rq->fence.seqno);
queue_work(rps_to_gt(rps)->i915->unordered_wq,
&slpc->boost_work);
}
return;
}
if (atomic_fetch_inc(&rps->num_waiters))
return;
if (!intel_rps_is_active(rps))
return;
GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n",
rq->fence.context, rq->fence.seqno);
if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
queue_work(rps_to_gt(rps)->i915->unordered_wq, &rps->work);
WRITE_ONCE(rps->boosts, rps->boosts + 1); /* debug only */
}
}
int intel_rps_set(struct intel_rps *rps, u8 val)
{
int err;
lockdep_assert_held(&rps->lock);
GEM_BUG_ON(val > rps->max_freq);
GEM_BUG_ON(val < rps->min_freq);
if (intel_rps_is_active(rps)) {
err = rps_set(rps, val, true);
if (err)
return err;
/*
* Make sure we continue to get interrupts
* until we hit the minimum or maximum frequencies.
*/
if (intel_rps_has_interrupts(rps)) {
struct intel_uncore *uncore = rps_to_uncore(rps);
set(uncore,
GEN6_RP_INTERRUPT_LIMITS, rps_limits(rps, val));
set(uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, val));
}
}
rps->cur_freq = val;
return 0;
}
static u32 intel_rps_read_state_cap(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
struct intel_uncore *uncore = rps_to_uncore(rps);
if (IS_PONTEVECCHIO(i915))
return intel_uncore_read(uncore, PVC_RP_STATE_CAP);
else if (IS_XEHPSDV(i915))
return intel_uncore_read(uncore, XEHPSDV_RP_STATE_CAP);
else if (IS_GEN9_LP(i915))
return intel_uncore_read(uncore, BXT_RP_STATE_CAP);
else
return intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
}
static void
mtl_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *caps)
{
struct intel_uncore *uncore = rps_to_uncore(rps);
u32 rp_state_cap = rps_to_gt(rps)->type == GT_MEDIA ?
intel_uncore_read(uncore, MTL_MEDIAP_STATE_CAP) :
intel_uncore_read(uncore, MTL_RP_STATE_CAP);
u32 rpe = rps_to_gt(rps)->type == GT_MEDIA ?
intel_uncore_read(uncore, MTL_MPE_FREQUENCY) :
intel_uncore_read(uncore, MTL_GT_RPE_FREQUENCY);
/* MTL values are in units of 16.67 MHz */
caps->rp0_freq = REG_FIELD_GET(MTL_RP0_CAP_MASK, rp_state_cap);
caps->min_freq = REG_FIELD_GET(MTL_RPN_CAP_MASK, rp_state_cap);
caps->rp1_freq = REG_FIELD_GET(MTL_RPE_MASK, rpe);
}
static void
__gen6_rps_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *caps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 rp_state_cap;
rp_state_cap = intel_rps_read_state_cap(rps);
/* static values from HW: RP0 > RP1 > RPn (min_freq) */
if (IS_GEN9_LP(i915)) {
caps->rp0_freq = (rp_state_cap >> 16) & 0xff;
caps->rp1_freq = (rp_state_cap >> 8) & 0xff;
caps->min_freq = (rp_state_cap >> 0) & 0xff;
} else {
caps->rp0_freq = (rp_state_cap >> 0) & 0xff;
if (GRAPHICS_VER(i915) >= 10)
caps->rp1_freq = REG_FIELD_GET(RPE_MASK,
intel_uncore_read(to_gt(i915)->uncore,
GEN10_FREQ_INFO_REC));
else
caps->rp1_freq = (rp_state_cap >> 8) & 0xff;
caps->min_freq = (rp_state_cap >> 16) & 0xff;
}
if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) {
/*
* In this case rp_state_cap register reports frequencies in
* units of 50 MHz. Convert these to the actual "hw unit", i.e.
* units of 16.67 MHz
*/
caps->rp0_freq *= GEN9_FREQ_SCALER;
caps->rp1_freq *= GEN9_FREQ_SCALER;
caps->min_freq *= GEN9_FREQ_SCALER;
}
}
/**
* gen6_rps_get_freq_caps - Get freq caps exposed by HW
* @rps: the intel_rps structure
* @caps: returned freq caps
*
* Returned "caps" frequencies should be converted to MHz using
* intel_gpu_freq()
*/
void gen6_rps_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *caps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
if (IS_METEORLAKE(i915))
return mtl_get_freq_caps(rps, caps);
else
return __gen6_rps_get_freq_caps(rps, caps);
}
static void gen6_rps_init(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
struct intel_rps_freq_caps caps;
gen6_rps_get_freq_caps(rps, &caps);
rps->rp0_freq = caps.rp0_freq;
rps->rp1_freq = caps.rp1_freq;
rps->min_freq = caps.min_freq;
/* hw_max = RP0 until we check for overclocking */
rps->max_freq = rps->rp0_freq;
rps->efficient_freq = rps->rp1_freq;
if (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) {
u32 ddcc_status = 0;
u32 mult = 1;
if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11)
mult = GEN9_FREQ_SCALER;
if (snb_pcode_read(rps_to_gt(rps)->uncore,
HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
&ddcc_status, NULL) == 0)
rps->efficient_freq =
clamp_t(u32,
((ddcc_status >> 8) & 0xff) * mult,
rps->min_freq,
rps->max_freq);
}
}
static bool rps_reset(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
/* force a reset */
rps->power.mode = -1;
rps->last_freq = -1;
if (rps_set(rps, rps->min_freq, true)) {
drm_err(&i915->drm, "Failed to reset RPS to initial values\n");
return false;
}
rps->cur_freq = rps->min_freq;
return true;
}
/* See the Gen9_GT_PM_Programming_Guide doc for the below */
static bool gen9_rps_enable(struct intel_rps *rps)
{
struct intel_gt *gt = rps_to_gt(rps);
struct intel_uncore *uncore = gt->uncore;
/* Program defaults and thresholds for RPS */
if (GRAPHICS_VER(gt->i915) == 9)
intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
GEN9_FREQUENCY(rps->rp1_freq));
intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 0xa);
rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD;
return rps_reset(rps);
}
static bool gen8_rps_enable(struct intel_rps *rps)
{
struct intel_uncore *uncore = rps_to_uncore(rps);
intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
HSW_FREQUENCY(rps->rp1_freq));
intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD;
return rps_reset(rps);
}
static bool gen6_rps_enable(struct intel_rps *rps)
{
struct intel_uncore *uncore = rps_to_uncore(rps);
/* Power down if completely idle for over 50ms */
intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 50000);
intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
GEN6_PM_RP_DOWN_THRESHOLD |
GEN6_PM_RP_DOWN_TIMEOUT);
return rps_reset(rps);
}
static int chv_rps_max_freq(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
struct intel_gt *gt = rps_to_gt(rps);
u32 val;
val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
switch (gt->info.sseu.eu_total) {
case 8:
/* (2 * 4) config */
val >>= FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT;
break;
case 12:
/* (2 * 6) config */
val >>= FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT;
break;
case 16:
/* (2 * 8) config */
default:
/* Setting (2 * 8) Min RP0 for any other combination */
val >>= FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT;
break;
}
return val & FB_GFX_FREQ_FUSE_MASK;
}
static int chv_rps_rpe_freq(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 val;
val = vlv_punit_read(i915, PUNIT_GPU_DUTYCYCLE_REG);
val >>= PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT;
return val & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
}
static int chv_rps_guar_freq(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 val;
val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
return val & FB_GFX_FREQ_FUSE_MASK;
}
static u32 chv_rps_min_freq(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 val;
val = vlv_punit_read(i915, FB_GFX_FMIN_AT_VMIN_FUSE);
val >>= FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT;
return val & FB_GFX_FREQ_FUSE_MASK;
}
static bool chv_rps_enable(struct intel_rps *rps)
{
struct intel_uncore *uncore = rps_to_uncore(rps);
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 val;
/* 1: Program defaults and thresholds for RPS*/
intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000);
intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400);
intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000);
intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000);
intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000);
intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
/* 2: Enable RPS */
intel_uncore_write_fw(uncore, GEN6_RP_CONTROL,
GEN6_RP_MEDIA_HW_NORMAL_MODE |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG);
rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
GEN6_PM_RP_DOWN_THRESHOLD |
GEN6_PM_RP_DOWN_TIMEOUT);
/* Setting Fixed Bias */
vlv_punit_get(i915);
val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50;
vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
vlv_punit_put(i915);
/* RPS code assumes GPLL is used */
drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
"GPLL not enabled\n");
drm_dbg(&i915->drm, "GPLL enabled? %s\n",
str_yes_no(val & GPLLENABLE));
drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val);
return rps_reset(rps);
}
static int vlv_rps_guar_freq(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 val, rp1;
val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
rp1 = val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK;
rp1 >>= FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
return rp1;
}
static int vlv_rps_max_freq(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 val, rp0;
val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
/* Clamp to max */
rp0 = min_t(u32, rp0, 0xea);
return rp0;
}
static int vlv_rps_rpe_freq(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 val, rpe;
val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
return rpe;
}
static int vlv_rps_min_freq(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 val;
val = vlv_punit_read(i915, PUNIT_REG_GPU_LFM) & 0xff;
/*
* According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
* for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
* a BYT-M B0 the above register contains 0xbf. Moreover when setting
* a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
* to make sure it matches what Punit accepts.
*/
return max_t(u32, val, 0xc0);
}
static bool vlv_rps_enable(struct intel_rps *rps)
{
struct intel_uncore *uncore = rps_to_uncore(rps);
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 val;
intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000);
intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400);
intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000);
intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000);
intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000);
intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
intel_uncore_write_fw(uncore, GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
GEN6_RP_MEDIA_HW_NORMAL_MODE |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_CONT);
/* WaGsvRC0ResidencyMethod:vlv */
rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED;
vlv_punit_get(i915);
/* Setting Fixed Bias */
val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875;
vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
vlv_punit_put(i915);
/* RPS code assumes GPLL is used */
drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
"GPLL not enabled\n");
drm_dbg(&i915->drm, "GPLL enabled? %s\n",
str_yes_no(val & GPLLENABLE));
drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val);
return rps_reset(rps);
}
static unsigned long __ips_gfx_val(struct intel_ips *ips)
{
struct intel_rps *rps = container_of(ips, typeof(*rps), ips);
struct intel_uncore *uncore = rps_to_uncore(rps);
unsigned int t, state1, state2;
u32 pxvid, ext_v;
u64 corr, corr2;
lockdep_assert_held(&mchdev_lock);
pxvid = intel_uncore_read(uncore, PXVFREQ(rps->cur_freq));
pxvid = (pxvid >> 24) & 0x7f;
ext_v = pvid_to_extvid(rps_to_i915(rps), pxvid);
state1 = ext_v;
/* Revel in the empirically derived constants */
/* Correction factor in 1/100000 units */
t = ips_mch_val(uncore);
if (t > 80)
corr = t * 2349 + 135940;
else if (t >= 50)
corr = t * 964 + 29317;
else /* < 50 */
corr = t * 301 + 1004;
corr = div_u64(corr * 150142 * state1, 10000) - 78642;
corr2 = div_u64(corr, 100000) * ips->corr;
state2 = div_u64(corr2 * state1, 10000);
state2 /= 100; /* convert to mW */
__gen5_ips_update(ips);
return ips->gfx_power + state2;
}
static bool has_busy_stats(struct intel_rps *rps)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, rps_to_gt(rps), id) {
if (!intel_engine_supports_stats(engine))
return false;
}
return true;
}
void intel_rps_enable(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
struct intel_uncore *uncore = rps_to_uncore(rps);
bool enabled = false;
if (!HAS_RPS(i915))
return;
if (rps_uses_slpc(rps))
return;
intel_gt_check_clock_frequency(rps_to_gt(rps));
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
if (rps->max_freq <= rps->min_freq)
/* leave disabled, no room for dynamic reclocking */;
else if (IS_CHERRYVIEW(i915))
enabled = chv_rps_enable(rps);
else if (IS_VALLEYVIEW(i915))
enabled = vlv_rps_enable(rps);
else if (GRAPHICS_VER(i915) >= 9)
enabled = gen9_rps_enable(rps);
else if (GRAPHICS_VER(i915) >= 8)
enabled = gen8_rps_enable(rps);
else if (GRAPHICS_VER(i915) >= 6)
enabled = gen6_rps_enable(rps);
else if (IS_IRONLAKE_M(i915))
enabled = gen5_rps_enable(rps);
else
MISSING_CASE(GRAPHICS_VER(i915));
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
if (!enabled)
return;
GT_TRACE(rps_to_gt(rps),
"min:%x, max:%x, freq:[%d, %d], thresholds:[%u, %u]\n",
rps->min_freq, rps->max_freq,
intel_gpu_freq(rps, rps->min_freq),
intel_gpu_freq(rps, rps->max_freq),
rps->power.up_threshold,
rps->power.down_threshold);
GEM_BUG_ON(rps->max_freq < rps->min_freq);
GEM_BUG_ON(rps->idle_freq > rps->max_freq);
GEM_BUG_ON(rps->efficient_freq < rps->min_freq);
GEM_BUG_ON(rps->efficient_freq > rps->max_freq);
if (has_busy_stats(rps))
intel_rps_set_timer(rps);
else if (GRAPHICS_VER(i915) >= 6 && GRAPHICS_VER(i915) <= 11)
intel_rps_set_interrupts(rps);
else
/* Ironlake currently uses intel_ips.ko */ {}
intel_rps_set_enabled(rps);
}
static void gen6_rps_disable(struct intel_rps *rps)
{
set(rps_to_uncore(rps), GEN6_RP_CONTROL, 0);
}
void intel_rps_disable(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
if (!intel_rps_is_enabled(rps))
return;
intel_rps_clear_enabled(rps);
intel_rps_clear_interrupts(rps);
intel_rps_clear_timer(rps);
if (GRAPHICS_VER(i915) >= 6)
gen6_rps_disable(rps);
else if (IS_IRONLAKE_M(i915))
gen5_rps_disable(rps);
}
static int byt_gpu_freq(struct intel_rps *rps, int val)
{
/*
* N = val - 0xb7
* Slow = Fast = GPLL ref * N
*/
return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000);
}
static int byt_freq_opcode(struct intel_rps *rps, int val)
{
return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7;
}
static int chv_gpu_freq(struct intel_rps *rps, int val)
{
/*
* N = val / 2
* CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
*/
return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000);
}
static int chv_freq_opcode(struct intel_rps *rps, int val)
{
/* CHV needs even values */
return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2;
}
int intel_gpu_freq(struct intel_rps *rps, int val)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
if (GRAPHICS_VER(i915) >= 9)
return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
GEN9_FREQ_SCALER);
else if (IS_CHERRYVIEW(i915))
return chv_gpu_freq(rps, val);
else if (IS_VALLEYVIEW(i915))
return byt_gpu_freq(rps, val);
else if (GRAPHICS_VER(i915) >= 6)
return val * GT_FREQUENCY_MULTIPLIER;
else
return val;
}
int intel_freq_opcode(struct intel_rps *rps, int val)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
if (GRAPHICS_VER(i915) >= 9)
return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
GT_FREQUENCY_MULTIPLIER);
else if (IS_CHERRYVIEW(i915))
return chv_freq_opcode(rps, val);
else if (IS_VALLEYVIEW(i915))
return byt_freq_opcode(rps, val);
else if (GRAPHICS_VER(i915) >= 6)
return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
else
return val;
}
static void vlv_init_gpll_ref_freq(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
rps->gpll_ref_freq =
vlv_get_cck_clock(i915, "GPLL ref",
CCK_GPLL_CLOCK_CONTROL,
i915->czclk_freq);
drm_dbg(&i915->drm, "GPLL reference freq: %d kHz\n",
rps->gpll_ref_freq);
}
static void vlv_rps_init(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
vlv_iosf_sb_get(i915,
BIT(VLV_IOSF_SB_PUNIT) |
BIT(VLV_IOSF_SB_NC) |
BIT(VLV_IOSF_SB_CCK));
vlv_init_gpll_ref_freq(rps);
rps->max_freq = vlv_rps_max_freq(rps);
rps->rp0_freq = rps->max_freq;
drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
intel_gpu_freq(rps, rps->max_freq), rps->max_freq);
rps->efficient_freq = vlv_rps_rpe_freq(rps);
drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n",
intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq);
rps->rp1_freq = vlv_rps_guar_freq(rps);
drm_dbg(&i915->drm, "RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq);
rps->min_freq = vlv_rps_min_freq(rps);
drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
vlv_iosf_sb_put(i915,
BIT(VLV_IOSF_SB_PUNIT) |
BIT(VLV_IOSF_SB_NC) |
BIT(VLV_IOSF_SB_CCK));
}
static void chv_rps_init(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
vlv_iosf_sb_get(i915,
BIT(VLV_IOSF_SB_PUNIT) |
BIT(VLV_IOSF_SB_NC) |
BIT(VLV_IOSF_SB_CCK));
vlv_init_gpll_ref_freq(rps);
rps->max_freq = chv_rps_max_freq(rps);
rps->rp0_freq = rps->max_freq;
drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
intel_gpu_freq(rps, rps->max_freq), rps->max_freq);
rps->efficient_freq = chv_rps_rpe_freq(rps);
drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n",
intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq);
rps->rp1_freq = chv_rps_guar_freq(rps);
drm_dbg(&i915->drm, "RP1(Guar) GPU freq: %d MHz (%u)\n",
intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq);
rps->min_freq = chv_rps_min_freq(rps);
drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
vlv_iosf_sb_put(i915,
BIT(VLV_IOSF_SB_PUNIT) |
BIT(VLV_IOSF_SB_NC) |
BIT(VLV_IOSF_SB_CCK));
drm_WARN_ONCE(&i915->drm, (rps->max_freq | rps->efficient_freq |
rps->rp1_freq | rps->min_freq) & 1,
"Odd GPU freq values\n");
}
static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei)
{
ei->ktime = ktime_get_raw();
ei->render_c0 = intel_uncore_read(uncore, VLV_RENDER_C0_COUNT);
ei->media_c0 = intel_uncore_read(uncore, VLV_MEDIA_C0_COUNT);
}
static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir)
{
struct intel_uncore *uncore = rps_to_uncore(rps);
const struct intel_rps_ei *prev = &rps->ei;
struct intel_rps_ei now;
u32 events = 0;
if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
return 0;
vlv_c0_read(uncore, &now);
if (prev->ktime) {
u64 time, c0;
u32 render, media;
time = ktime_us_delta(now.ktime, prev->ktime);
time *= rps_to_i915(rps)->czclk_freq;
/* Workload can be split between render + media,
* e.g. SwapBuffers being blitted in X after being rendered in
* mesa. To account for this we need to combine both engines
* into our activity counter.
*/
render = now.render_c0 - prev->render_c0;
media = now.media_c0 - prev->media_c0;
c0 = max(render, media);
c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
if (c0 > time * rps->power.up_threshold)
events = GEN6_PM_RP_UP_THRESHOLD;
else if (c0 < time * rps->power.down_threshold)
events = GEN6_PM_RP_DOWN_THRESHOLD;
}
rps->ei = now;
return events;
}
static void rps_work(struct work_struct *work)
{
struct intel_rps *rps = container_of(work, typeof(*rps), work);
struct intel_gt *gt = rps_to_gt(rps);
struct drm_i915_private *i915 = rps_to_i915(rps);
bool client_boost = false;
int new_freq, adj, min, max;
u32 pm_iir = 0;
spin_lock_irq(gt->irq_lock);
pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events;
client_boost = atomic_read(&rps->num_waiters);
spin_unlock_irq(gt->irq_lock);
/* Make sure we didn't queue anything we're not going to process. */
if (!pm_iir && !client_boost)
goto out;
mutex_lock(&rps->lock);
if (!intel_rps_is_active(rps)) {
mutex_unlock(&rps->lock);
return;
}
pm_iir |= vlv_wa_c0_ei(rps, pm_iir);
adj = rps->last_adj;
new_freq = rps->cur_freq;
min = rps->min_freq_softlimit;
max = rps->max_freq_softlimit;
if (client_boost)
max = rps->max_freq;
GT_TRACE(gt,
"pm_iir:%x, client_boost:%s, last:%d, cur:%x, min:%x, max:%x\n",
pm_iir, str_yes_no(client_boost),
adj, new_freq, min, max);
if (client_boost && new_freq < rps->boost_freq) {
new_freq = rps->boost_freq;
adj = 0;
} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
if (adj > 0)
adj *= 2;
else /* CHV needs even encode values */
adj = IS_CHERRYVIEW(gt->i915) ? 2 : 1;
if (new_freq >= rps->max_freq_softlimit)
adj = 0;
} else if (client_boost) {
adj = 0;
} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
if (rps->cur_freq > rps->efficient_freq)
new_freq = rps->efficient_freq;
else if (rps->cur_freq > rps->min_freq_softlimit)
new_freq = rps->min_freq_softlimit;
adj = 0;
} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
if (adj < 0)
adj *= 2;
else /* CHV needs even encode values */
adj = IS_CHERRYVIEW(gt->i915) ? -2 : -1;
if (new_freq <= rps->min_freq_softlimit)
adj = 0;
} else { /* unknown event */
adj = 0;
}
/*
* sysfs frequency limits may have snuck in while
* servicing the interrupt
*/
new_freq += adj;
new_freq = clamp_t(int, new_freq, min, max);
if (intel_rps_set(rps, new_freq)) {
drm_dbg(&i915->drm, "Failed to set new GPU frequency\n");
adj = 0;
}
rps->last_adj = adj;
mutex_unlock(&rps->lock);
out:
spin_lock_irq(gt->irq_lock);
gen6_gt_pm_unmask_irq(gt, rps->pm_events);
spin_unlock_irq(gt->irq_lock);
}
void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
{
struct intel_gt *gt = rps_to_gt(rps);
const u32 events = rps->pm_events & pm_iir;
lockdep_assert_held(gt->irq_lock);
if (unlikely(!events))
return;
GT_TRACE(gt, "irq events:%x\n", events);
gen6_gt_pm_mask_irq(gt, events);
rps->pm_iir |= events;
queue_work(gt->i915->unordered_wq, &rps->work);
}
void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
{
struct intel_gt *gt = rps_to_gt(rps);
u32 events;
events = pm_iir & rps->pm_events;
if (events) {
spin_lock(gt->irq_lock);
GT_TRACE(gt, "irq events:%x\n", events);
gen6_gt_pm_mask_irq(gt, events);
rps->pm_iir |= events;
queue_work(gt->i915->unordered_wq, &rps->work);
spin_unlock(gt->irq_lock);
}
if (GRAPHICS_VER(gt->i915) >= 8)
return;
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
intel_engine_cs_irq(gt->engine[VECS0], pm_iir >> 10);
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
drm_dbg(&rps_to_i915(rps)->drm,
"Command parser error, pm_iir 0x%08x\n", pm_iir);
}
void gen5_rps_irq_handler(struct intel_rps *rps)
{
struct intel_uncore *uncore = rps_to_uncore(rps);
u32 busy_up, busy_down, max_avg, min_avg;
u8 new_freq;
spin_lock(&mchdev_lock);
intel_uncore_write16(uncore,
MEMINTRSTS,
intel_uncore_read(uncore, MEMINTRSTS));
intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG);
max_avg = intel_uncore_read(uncore, RCBMAXAVG);
min_avg = intel_uncore_read(uncore, RCBMINAVG);
/* Handle RCS change request from hw */
new_freq = rps->cur_freq;
if (busy_up > max_avg)
new_freq++;
else if (busy_down < min_avg)
new_freq--;
new_freq = clamp(new_freq,
rps->min_freq_softlimit,
rps->max_freq_softlimit);
if (new_freq != rps->cur_freq && !__gen5_rps_set(rps, new_freq))
rps->cur_freq = new_freq;
spin_unlock(&mchdev_lock);
}
void intel_rps_init_early(struct intel_rps *rps)
{
mutex_init(&rps->lock);
mutex_init(&rps->power.mutex);
INIT_WORK(&rps->work, rps_work);
timer_setup(&rps->timer, rps_timer, 0);
atomic_set(&rps->num_waiters, 0);
}
void intel_rps_init(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
if (rps_uses_slpc(rps))
return;
if (IS_CHERRYVIEW(i915))
chv_rps_init(rps);
else if (IS_VALLEYVIEW(i915))
vlv_rps_init(rps);
else if (GRAPHICS_VER(i915) >= 6)
gen6_rps_init(rps);
else if (IS_IRONLAKE_M(i915))
gen5_rps_init(rps);
/* Derive initial user preferences/limits from the hardware limits */
rps->max_freq_softlimit = rps->max_freq;
rps_to_gt(rps)->defaults.max_freq = rps->max_freq_softlimit;
rps->min_freq_softlimit = rps->min_freq;
rps_to_gt(rps)->defaults.min_freq = rps->min_freq_softlimit;
/* After setting max-softlimit, find the overclock max freq */
if (GRAPHICS_VER(i915) == 6 || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) {
u32 params = 0;
snb_pcode_read(rps_to_gt(rps)->uncore, GEN6_READ_OC_PARAMS, ¶ms, NULL);
if (params & BIT(31)) { /* OC supported */
drm_dbg(&i915->drm,
"Overclocking supported, max: %dMHz, overclock: %dMHz\n",
(rps->max_freq & 0xff) * 50,
(params & 0xff) * 50);
rps->max_freq = params & 0xff;
}
}
/* Set default thresholds in % */
rps->power.up_threshold = 95;
rps_to_gt(rps)->defaults.rps_up_threshold = rps->power.up_threshold;
rps->power.down_threshold = 85;
rps_to_gt(rps)->defaults.rps_down_threshold = rps->power.down_threshold;
/* Finally allow us to boost to max by default */
rps->boost_freq = rps->max_freq;
rps->idle_freq = rps->min_freq;
/* Start in the middle, from here we will autotune based on workload */
rps->cur_freq = rps->efficient_freq;
rps->pm_intrmsk_mbz = 0;
/*
* SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
* if GEN6_PM_UP_EI_EXPIRED is masked.
*
* TODO: verify if this can be reproduced on VLV,CHV.
*/
if (GRAPHICS_VER(i915) <= 7)
rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) < 11)
rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
/* GuC needs ARAT expired interrupt unmasked */
if (intel_uc_uses_guc_submission(&rps_to_gt(rps)->uc))
rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
}
void intel_rps_sanitize(struct intel_rps *rps)
{
if (rps_uses_slpc(rps))
return;
if (GRAPHICS_VER(rps_to_i915(rps)) >= 6)
rps_disable_interrupts(rps);
}
u32 intel_rps_read_rpstat(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
i915_reg_t rpstat;
rpstat = (GRAPHICS_VER(i915) >= 12) ? GEN12_RPSTAT1 : GEN6_RPSTAT1;
return intel_uncore_read(rps_to_gt(rps)->uncore, rpstat);
}
static u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
u32 cagf;
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
cagf = REG_FIELD_GET(MTL_CAGF_MASK, rpstat);
else if (GRAPHICS_VER(i915) >= 12)
cagf = REG_FIELD_GET(GEN12_CAGF_MASK, rpstat);
else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
cagf = REG_FIELD_GET(RPE_MASK, rpstat);
else if (GRAPHICS_VER(i915) >= 9)
cagf = REG_FIELD_GET(GEN9_CAGF_MASK, rpstat);
else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
cagf = REG_FIELD_GET(HSW_CAGF_MASK, rpstat);
else if (GRAPHICS_VER(i915) >= 6)
cagf = REG_FIELD_GET(GEN6_CAGF_MASK, rpstat);
else
cagf = gen5_invert_freq(rps, REG_FIELD_GET(MEMSTAT_PSTATE_MASK, rpstat));
return cagf;
}
static u32 __read_cagf(struct intel_rps *rps, bool take_fw)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
struct intel_uncore *uncore = rps_to_uncore(rps);
i915_reg_t r = INVALID_MMIO_REG;
u32 freq;
/*
* For Gen12+ reading freq from HW does not need a forcewake and
* registers will return 0 freq when GT is in RC6
*/
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) {
r = MTL_MIRROR_TARGET_WP1;
} else if (GRAPHICS_VER(i915) >= 12) {
r = GEN12_RPSTAT1;
} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
vlv_punit_get(i915);
freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
vlv_punit_put(i915);
} else if (GRAPHICS_VER(i915) >= 6) {
r = GEN6_RPSTAT1;
} else {
r = MEMSTAT_ILK;
}
if (i915_mmio_reg_valid(r))
freq = take_fw ? intel_uncore_read(uncore, r) : intel_uncore_read_fw(uncore, r);
return intel_rps_get_cagf(rps, freq);
}
static u32 read_cagf(struct intel_rps *rps)
{
return __read_cagf(rps, true);
}
u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
{
struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
intel_wakeref_t wakeref;
u32 freq = 0;
with_intel_runtime_pm_if_in_use(rpm, wakeref)
freq = intel_gpu_freq(rps, read_cagf(rps));
return freq;
}
u32 intel_rps_read_actual_frequency_fw(struct intel_rps *rps)
{
return intel_gpu_freq(rps, __read_cagf(rps, false));
}
static u32 intel_rps_read_punit_req(struct intel_rps *rps)
{
struct intel_uncore *uncore = rps_to_uncore(rps);
struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
intel_wakeref_t wakeref;
u32 freq = 0;
with_intel_runtime_pm_if_in_use(rpm, wakeref)
freq = intel_uncore_read(uncore, GEN6_RPNSWREQ);
return freq;
}
static u32 intel_rps_get_req(u32 pureq)
{
u32 req = pureq >> GEN9_SW_REQ_UNSLICE_RATIO_SHIFT;
return req;
}
u32 intel_rps_read_punit_req_frequency(struct intel_rps *rps)
{
u32 freq = intel_rps_get_req(intel_rps_read_punit_req(rps));
return intel_gpu_freq(rps, freq);
}
u32 intel_rps_get_requested_frequency(struct intel_rps *rps)
{
if (rps_uses_slpc(rps))
return intel_rps_read_punit_req_frequency(rps);
else
return intel_gpu_freq(rps, rps->cur_freq);
}
u32 intel_rps_get_max_frequency(struct intel_rps *rps)
{
struct intel_guc_slpc *slpc = rps_to_slpc(rps);
if (rps_uses_slpc(rps))
return slpc->max_freq_softlimit;
else
return intel_gpu_freq(rps, rps->max_freq_softlimit);
}
/**
* intel_rps_get_max_raw_freq - returns the max frequency in some raw format.
* @rps: the intel_rps structure
*
* Returns the max frequency in a raw format. In newer platforms raw is in
* units of 50 MHz.
*/
u32 intel_rps_get_max_raw_freq(struct intel_rps *rps)
{
struct intel_guc_slpc *slpc = rps_to_slpc(rps);
u32 freq;
if (rps_uses_slpc(rps)) {
return DIV_ROUND_CLOSEST(slpc->rp0_freq,
GT_FREQUENCY_MULTIPLIER);
} else {
freq = rps->max_freq;
if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) {
/* Convert GT frequency to 50 MHz units */
freq /= GEN9_FREQ_SCALER;
}
return freq;
}
}
u32 intel_rps_get_rp0_frequency(struct intel_rps *rps)
{
struct intel_guc_slpc *slpc = rps_to_slpc(rps);
if (rps_uses_slpc(rps))
return slpc->rp0_freq;
else
return intel_gpu_freq(rps, rps->rp0_freq);
}
u32 intel_rps_get_rp1_frequency(struct intel_rps *rps)
{
struct intel_guc_slpc *slpc = rps_to_slpc(rps);
if (rps_uses_slpc(rps))
return slpc->rp1_freq;
else
return intel_gpu_freq(rps, rps->rp1_freq);
}
u32 intel_rps_get_rpn_frequency(struct intel_rps *rps)
{
struct intel_guc_slpc *slpc = rps_to_slpc(rps);
if (rps_uses_slpc(rps))
return slpc->min_freq;
else
return intel_gpu_freq(rps, rps->min_freq);
}
static void rps_frequency_dump(struct intel_rps *rps, struct drm_printer *p)
{
struct intel_gt *gt = rps_to_gt(rps);
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
struct intel_rps_freq_caps caps;
u32 rp_state_limits;
u32 gt_perf_status;
u32 rpmodectl, rpinclimit, rpdeclimit;
u32 rpstat, cagf, reqf;
u32 rpcurupei, rpcurup, rpprevup;
u32 rpcurdownei, rpcurdown, rpprevdown;
u32 rpupei, rpupt, rpdownei, rpdownt;
u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
rp_state_limits = intel_uncore_read(uncore, GEN6_RP_STATE_LIMITS);
gen6_rps_get_freq_caps(rps, &caps);
if (IS_GEN9_LP(i915))
gt_perf_status = intel_uncore_read(uncore, BXT_GT_PERF_STATUS);
else
gt_perf_status = intel_uncore_read(uncore, GEN6_GT_PERF_STATUS);
/* RPSTAT1 is in the GT power well */
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
reqf = intel_uncore_read(uncore, GEN6_RPNSWREQ);
if (GRAPHICS_VER(i915) >= 9) {
reqf >>= 23;
} else {
reqf &= ~GEN6_TURBO_DISABLE;
if (IS_HASWELL(i915) || IS_BROADWELL(i915))
reqf >>= 24;
else
reqf >>= 25;
}
reqf = intel_gpu_freq(rps, reqf);
rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL);
rpinclimit = intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD);
rpdeclimit = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD);
rpstat = intel_rps_read_rpstat(rps);
rpcurupei = intel_uncore_read(uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
rpcurup = intel_uncore_read(uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
rpprevup = intel_uncore_read(uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
rpcurdownei = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
rpcurdown = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
rpprevdown = intel_uncore_read(uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
rpupei = intel_uncore_read(uncore, GEN6_RP_UP_EI);
rpupt = intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD);
rpdownei = intel_uncore_read(uncore, GEN6_RP_DOWN_EI);
rpdownt = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD);
cagf = intel_rps_read_actual_frequency(rps);
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
if (GRAPHICS_VER(i915) >= 11) {
pm_ier = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE);
pm_imr = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK);
/*
* The equivalent to the PM ISR & IIR cannot be read
* without affecting the current state of the system
*/
pm_isr = 0;
pm_iir = 0;
} else if (GRAPHICS_VER(i915) >= 8) {
pm_ier = intel_uncore_read(uncore, GEN8_GT_IER(2));
pm_imr = intel_uncore_read(uncore, GEN8_GT_IMR(2));
pm_isr = intel_uncore_read(uncore, GEN8_GT_ISR(2));
pm_iir = intel_uncore_read(uncore, GEN8_GT_IIR(2));
} else {
pm_ier = intel_uncore_read(uncore, GEN6_PMIER);
pm_imr = intel_uncore_read(uncore, GEN6_PMIMR);
pm_isr = intel_uncore_read(uncore, GEN6_PMISR);
pm_iir = intel_uncore_read(uncore, GEN6_PMIIR);
}
pm_mask = intel_uncore_read(uncore, GEN6_PMINTRMSK);
drm_printf(p, "Video Turbo Mode: %s\n",
str_yes_no(rpmodectl & GEN6_RP_MEDIA_TURBO));
drm_printf(p, "HW control enabled: %s\n",
str_yes_no(rpmodectl & GEN6_RP_ENABLE));
drm_printf(p, "SW control enabled: %s\n",
str_yes_no((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == GEN6_RP_MEDIA_SW_MODE));
drm_printf(p, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
pm_ier, pm_imr, pm_mask);
if (GRAPHICS_VER(i915) <= 10)
drm_printf(p, "PM ISR=0x%08x IIR=0x%08x\n",
pm_isr, pm_iir);
drm_printf(p, "pm_intrmsk_mbz: 0x%08x\n",
rps->pm_intrmsk_mbz);
drm_printf(p, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
drm_printf(p, "Render p-state ratio: %d\n",
(gt_perf_status & (GRAPHICS_VER(i915) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
drm_printf(p, "Render p-state VID: %d\n",
gt_perf_status & 0xff);
drm_printf(p, "Render p-state limit: %d\n",
rp_state_limits & 0xff);
drm_printf(p, "RPSTAT1: 0x%08x\n", rpstat);
drm_printf(p, "RPMODECTL: 0x%08x\n", rpmodectl);
drm_printf(p, "RPINCLIMIT: 0x%08x\n", rpinclimit);
drm_printf(p, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
drm_printf(p, "RPNSWREQ: %dMHz\n", reqf);
drm_printf(p, "CAGF: %dMHz\n", cagf);
drm_printf(p, "RP CUR UP EI: %d (%lldns)\n",
rpcurupei,
intel_gt_pm_interval_to_ns(gt, rpcurupei));
drm_printf(p, "RP CUR UP: %d (%lldns)\n",
rpcurup, intel_gt_pm_interval_to_ns(gt, rpcurup));
drm_printf(p, "RP PREV UP: %d (%lldns)\n",
rpprevup, intel_gt_pm_interval_to_ns(gt, rpprevup));
drm_printf(p, "Up threshold: %d%%\n",
rps->power.up_threshold);
drm_printf(p, "RP UP EI: %d (%lldns)\n",
rpupei, intel_gt_pm_interval_to_ns(gt, rpupei));
drm_printf(p, "RP UP THRESHOLD: %d (%lldns)\n",
rpupt, intel_gt_pm_interval_to_ns(gt, rpupt));
drm_printf(p, "RP CUR DOWN EI: %d (%lldns)\n",
rpcurdownei,
intel_gt_pm_interval_to_ns(gt, rpcurdownei));
drm_printf(p, "RP CUR DOWN: %d (%lldns)\n",
rpcurdown,
intel_gt_pm_interval_to_ns(gt, rpcurdown));
drm_printf(p, "RP PREV DOWN: %d (%lldns)\n",
rpprevdown,
intel_gt_pm_interval_to_ns(gt, rpprevdown));
drm_printf(p, "Down threshold: %d%%\n",
rps->power.down_threshold);
drm_printf(p, "RP DOWN EI: %d (%lldns)\n",
rpdownei, intel_gt_pm_interval_to_ns(gt, rpdownei));
drm_printf(p, "RP DOWN THRESHOLD: %d (%lldns)\n",
rpdownt, intel_gt_pm_interval_to_ns(gt, rpdownt));
drm_printf(p, "Lowest (RPN) frequency: %dMHz\n",
intel_gpu_freq(rps, caps.min_freq));
drm_printf(p, "Nominal (RP1) frequency: %dMHz\n",
intel_gpu_freq(rps, caps.rp1_freq));
drm_printf(p, "Max non-overclocked (RP0) frequency: %dMHz\n",
intel_gpu_freq(rps, caps.rp0_freq));
drm_printf(p, "Max overclocked frequency: %dMHz\n",
intel_gpu_freq(rps, rps->max_freq));
drm_printf(p, "Current freq: %d MHz\n",
intel_gpu_freq(rps, rps->cur_freq));
drm_printf(p, "Actual freq: %d MHz\n", cagf);
drm_printf(p, "Idle freq: %d MHz\n",
intel_gpu_freq(rps, rps->idle_freq));
drm_printf(p, "Min freq: %d MHz\n",
intel_gpu_freq(rps, rps->min_freq));
drm_printf(p, "Boost freq: %d MHz\n",
intel_gpu_freq(rps, rps->boost_freq));
drm_printf(p, "Max freq: %d MHz\n",
intel_gpu_freq(rps, rps->max_freq));
drm_printf(p,
"efficient (RPe) frequency: %d MHz\n",
intel_gpu_freq(rps, rps->efficient_freq));
}
static void slpc_frequency_dump(struct intel_rps *rps, struct drm_printer *p)
{
struct intel_gt *gt = rps_to_gt(rps);
struct intel_uncore *uncore = gt->uncore;
struct intel_rps_freq_caps caps;
u32 pm_mask;
gen6_rps_get_freq_caps(rps, &caps);
pm_mask = intel_uncore_read(uncore, GEN6_PMINTRMSK);
drm_printf(p, "PM MASK=0x%08x\n", pm_mask);
drm_printf(p, "pm_intrmsk_mbz: 0x%08x\n",
rps->pm_intrmsk_mbz);
drm_printf(p, "RPSTAT1: 0x%08x\n", intel_rps_read_rpstat(rps));
drm_printf(p, "RPNSWREQ: %dMHz\n", intel_rps_get_requested_frequency(rps));
drm_printf(p, "Lowest (RPN) frequency: %dMHz\n",
intel_gpu_freq(rps, caps.min_freq));
drm_printf(p, "Nominal (RP1) frequency: %dMHz\n",
intel_gpu_freq(rps, caps.rp1_freq));
drm_printf(p, "Max non-overclocked (RP0) frequency: %dMHz\n",
intel_gpu_freq(rps, caps.rp0_freq));
drm_printf(p, "Current freq: %d MHz\n",
intel_rps_get_requested_frequency(rps));
drm_printf(p, "Actual freq: %d MHz\n",
intel_rps_read_actual_frequency(rps));
drm_printf(p, "Min freq: %d MHz\n",
intel_rps_get_min_frequency(rps));
drm_printf(p, "Boost freq: %d MHz\n",
intel_rps_get_boost_frequency(rps));
drm_printf(p, "Max freq: %d MHz\n",
intel_rps_get_max_frequency(rps));
drm_printf(p,
"efficient (RPe) frequency: %d MHz\n",
intel_gpu_freq(rps, caps.rp1_freq));
}
void gen6_rps_frequency_dump(struct intel_rps *rps, struct drm_printer *p)
{
if (rps_uses_slpc(rps))
return slpc_frequency_dump(rps, p);
else
return rps_frequency_dump(rps, p);
}
static int set_max_freq(struct intel_rps *rps, u32 val)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
int ret = 0;
mutex_lock(&rps->lock);
val = intel_freq_opcode(rps, val);
if (val < rps->min_freq ||
val > rps->max_freq ||
val < rps->min_freq_softlimit) {
ret = -EINVAL;
goto unlock;
}
if (val > rps->rp0_freq)
drm_dbg(&i915->drm, "User requested overclocking to %d\n",
intel_gpu_freq(rps, val));
rps->max_freq_softlimit = val;
val = clamp_t(int, rps->cur_freq,
rps->min_freq_softlimit,
rps->max_freq_softlimit);
/*
* We still need *_set_rps to process the new max_delay and
* update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged.
*/
intel_rps_set(rps, val);
unlock:
mutex_unlock(&rps->lock);
return ret;
}
int intel_rps_set_max_frequency(struct intel_rps *rps, u32 val)
{
struct intel_guc_slpc *slpc = rps_to_slpc(rps);
if (rps_uses_slpc(rps))
return intel_guc_slpc_set_max_freq(slpc, val);
else
return set_max_freq(rps, val);
}
u32 intel_rps_get_min_frequency(struct intel_rps *rps)
{
struct intel_guc_slpc *slpc = rps_to_slpc(rps);
if (rps_uses_slpc(rps))
return slpc->min_freq_softlimit;
else
return intel_gpu_freq(rps, rps->min_freq_softlimit);
}
/**
* intel_rps_get_min_raw_freq - returns the min frequency in some raw format.
* @rps: the intel_rps structure
*
* Returns the min frequency in a raw format. In newer platforms raw is in
* units of 50 MHz.
*/
u32 intel_rps_get_min_raw_freq(struct intel_rps *rps)
{
struct intel_guc_slpc *slpc = rps_to_slpc(rps);
u32 freq;
if (rps_uses_slpc(rps)) {
return DIV_ROUND_CLOSEST(slpc->min_freq,
GT_FREQUENCY_MULTIPLIER);
} else {
freq = rps->min_freq;
if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) {
/* Convert GT frequency to 50 MHz units */
freq /= GEN9_FREQ_SCALER;
}
return freq;
}
}
static int set_min_freq(struct intel_rps *rps, u32 val)
{
int ret = 0;
mutex_lock(&rps->lock);
val = intel_freq_opcode(rps, val);
if (val < rps->min_freq ||
val > rps->max_freq ||
val > rps->max_freq_softlimit) {
ret = -EINVAL;
goto unlock;
}
rps->min_freq_softlimit = val;
val = clamp_t(int, rps->cur_freq,
rps->min_freq_softlimit,
rps->max_freq_softlimit);
/*
* We still need *_set_rps to process the new min_delay and
* update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged.
*/
intel_rps_set(rps, val);
unlock:
mutex_unlock(&rps->lock);
return ret;
}
int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val)
{
struct intel_guc_slpc *slpc = rps_to_slpc(rps);
if (rps_uses_slpc(rps))
return intel_guc_slpc_set_min_freq(slpc, val);
else
return set_min_freq(rps, val);
}
u8 intel_rps_get_up_threshold(struct intel_rps *rps)
{
return rps->power.up_threshold;
}
static int rps_set_threshold(struct intel_rps *rps, u8 *threshold, u8 val)
{
int ret;
if (val > 100)
return -EINVAL;
ret = mutex_lock_interruptible(&rps->lock);
if (ret)
return ret;
if (*threshold == val)
goto out_unlock;
*threshold = val;
/* Force reset. */
rps->last_freq = -1;
mutex_lock(&rps->power.mutex);
rps->power.mode = -1;
mutex_unlock(&rps->power.mutex);
intel_rps_set(rps, clamp(rps->cur_freq,
rps->min_freq_softlimit,
rps->max_freq_softlimit));
out_unlock:
mutex_unlock(&rps->lock);
return ret;
}
int intel_rps_set_up_threshold(struct intel_rps *rps, u8 threshold)
{
return rps_set_threshold(rps, &rps->power.up_threshold, threshold);
}
u8 intel_rps_get_down_threshold(struct intel_rps *rps)
{
return rps->power.down_threshold;
}
int intel_rps_set_down_threshold(struct intel_rps *rps, u8 threshold)
{
return rps_set_threshold(rps, &rps->power.down_threshold, threshold);
}
static void intel_rps_set_manual(struct intel_rps *rps, bool enable)
{
struct intel_uncore *uncore = rps_to_uncore(rps);
u32 state = enable ? GEN9_RPSWCTL_ENABLE : GEN9_RPSWCTL_DISABLE;
/* Allow punit to process software requests */
intel_uncore_write(uncore, GEN6_RP_CONTROL, state);
}
void intel_rps_raise_unslice(struct intel_rps *rps)
{
struct intel_uncore *uncore = rps_to_uncore(rps);
mutex_lock(&rps->lock);
if (rps_uses_slpc(rps)) {
/* RP limits have not been initialized yet for SLPC path */
struct intel_rps_freq_caps caps;
gen6_rps_get_freq_caps(rps, &caps);
intel_rps_set_manual(rps, true);
intel_uncore_write(uncore, GEN6_RPNSWREQ,
((caps.rp0_freq <<
GEN9_SW_REQ_UNSLICE_RATIO_SHIFT) |
GEN9_IGNORE_SLICE_RATIO));
intel_rps_set_manual(rps, false);
} else {
intel_rps_set(rps, rps->rp0_freq);
}
mutex_unlock(&rps->lock);
}
void intel_rps_lower_unslice(struct intel_rps *rps)
{
struct intel_uncore *uncore = rps_to_uncore(rps);
mutex_lock(&rps->lock);
if (rps_uses_slpc(rps)) {
/* RP limits have not been initialized yet for SLPC path */
struct intel_rps_freq_caps caps;
gen6_rps_get_freq_caps(rps, &caps);
intel_rps_set_manual(rps, true);
intel_uncore_write(uncore, GEN6_RPNSWREQ,
((caps.min_freq <<
GEN9_SW_REQ_UNSLICE_RATIO_SHIFT) |
GEN9_IGNORE_SLICE_RATIO));
intel_rps_set_manual(rps, false);
} else {
intel_rps_set(rps, rps->min_freq);
}
mutex_unlock(&rps->lock);
}
static u32 rps_read_mmio(struct intel_rps *rps, i915_reg_t reg32)
{
struct intel_gt *gt = rps_to_gt(rps);
intel_wakeref_t wakeref;
u32 val;
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
val = intel_uncore_read(gt->uncore, reg32);
return val;
}
bool rps_read_mask_mmio(struct intel_rps *rps,
i915_reg_t reg32, u32 mask)
{
return rps_read_mmio(rps, reg32) & mask;
}
/* External interface for intel_ips.ko */
static struct drm_i915_private __rcu *ips_mchdev;
/*
* Tells the intel_ips driver that the i915 driver is now loaded, if
* IPS got loaded first.
*
* This awkward dance is so that neither module has to depend on the
* other in order for IPS to do the appropriate communication of
* GPU turbo limits to i915.
*/
static void
ips_ping_for_i915_load(void)
{
void (*link)(void);
link = symbol_get(ips_link_to_i915_driver);
if (link) {
link();
symbol_put(ips_link_to_i915_driver);
}
}
void intel_rps_driver_register(struct intel_rps *rps)
{
struct intel_gt *gt = rps_to_gt(rps);
/*
* We only register the i915 ips part with intel-ips once everything is
* set up, to avoid intel-ips sneaking in and reading bogus values.
*/
if (GRAPHICS_VER(gt->i915) == 5) {
GEM_BUG_ON(ips_mchdev);
rcu_assign_pointer(ips_mchdev, gt->i915);
ips_ping_for_i915_load();
}
}
void intel_rps_driver_unregister(struct intel_rps *rps)
{
if (rcu_access_pointer(ips_mchdev) == rps_to_i915(rps))
rcu_assign_pointer(ips_mchdev, NULL);
}
static struct drm_i915_private *mchdev_get(void)
{
struct drm_i915_private *i915;
rcu_read_lock();
i915 = rcu_dereference(ips_mchdev);
if (i915 && !kref_get_unless_zero(&i915->drm.ref))
i915 = NULL;
rcu_read_unlock();
return i915;
}
/**
* i915_read_mch_val - return value for IPS use
*
* Calculate and return a value for the IPS driver to use when deciding whether
* we have thermal and power headroom to increase CPU or GPU power budget.
*/
unsigned long i915_read_mch_val(void)
{
struct drm_i915_private *i915;
unsigned long chipset_val = 0;
unsigned long graphics_val = 0;
intel_wakeref_t wakeref;
i915 = mchdev_get();
if (!i915)
return 0;
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
struct intel_ips *ips = &to_gt(i915)->rps.ips;
spin_lock_irq(&mchdev_lock);
chipset_val = __ips_chipset_val(ips);
graphics_val = __ips_gfx_val(ips);
spin_unlock_irq(&mchdev_lock);
}
drm_dev_put(&i915->drm);
return chipset_val + graphics_val;
}
EXPORT_SYMBOL_GPL(i915_read_mch_val);
/**
* i915_gpu_raise - raise GPU frequency limit
*
* Raise the limit; IPS indicates we have thermal headroom.
*/
bool i915_gpu_raise(void)
{
struct drm_i915_private *i915;
struct intel_rps *rps;
i915 = mchdev_get();
if (!i915)
return false;
rps = &to_gt(i915)->rps;
spin_lock_irq(&mchdev_lock);
if (rps->max_freq_softlimit < rps->max_freq)
rps->max_freq_softlimit++;
spin_unlock_irq(&mchdev_lock);
drm_dev_put(&i915->drm);
return true;
}
EXPORT_SYMBOL_GPL(i915_gpu_raise);
/**
* i915_gpu_lower - lower GPU frequency limit
*
* IPS indicates we're close to a thermal limit, so throttle back the GPU
* frequency maximum.
*/
bool i915_gpu_lower(void)
{
struct drm_i915_private *i915;
struct intel_rps *rps;
i915 = mchdev_get();
if (!i915)
return false;
rps = &to_gt(i915)->rps;
spin_lock_irq(&mchdev_lock);
if (rps->max_freq_softlimit > rps->min_freq)
rps->max_freq_softlimit--;
spin_unlock_irq(&mchdev_lock);
drm_dev_put(&i915->drm);
return true;
}
EXPORT_SYMBOL_GPL(i915_gpu_lower);
/**
* i915_gpu_busy - indicate GPU business to IPS
*
* Tell the IPS driver whether or not the GPU is busy.
*/
bool i915_gpu_busy(void)
{
struct drm_i915_private *i915;
bool ret;
i915 = mchdev_get();
if (!i915)
return false;
ret = to_gt(i915)->awake;
drm_dev_put(&i915->drm);
return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_busy);
/**
* i915_gpu_turbo_disable - disable graphics turbo
*
* Disable graphics turbo by resetting the max frequency and setting the
* current frequency to the default.
*/
bool i915_gpu_turbo_disable(void)
{
struct drm_i915_private *i915;
struct intel_rps *rps;
bool ret;
i915 = mchdev_get();
if (!i915)
return false;
rps = &to_gt(i915)->rps;
spin_lock_irq(&mchdev_lock);
rps->max_freq_softlimit = rps->min_freq;
ret = !__gen5_rps_set(&to_gt(i915)->rps, rps->min_freq);
spin_unlock_irq(&mchdev_lock);
drm_dev_put(&i915->drm);
return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_rps.c"
#include "selftest_slpc.c"
#endif
| linux-master | drivers/gpu/drm/i915/gt/intel_rps.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
#include "i915_drv.h"
#include "i915_pci.h"
#include "i915_reg.h"
#include "intel_memory_region.h"
#include "intel_pci_config.h"
#include "intel_region_lmem.h"
#include "intel_region_ttm.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_ttm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_mcr.h"
#include "gt/intel_gt_regs.h"
#ifdef CONFIG_64BIT
static void _release_bars(struct pci_dev *pdev)
{
int resno;
for (resno = PCI_STD_RESOURCES; resno < PCI_STD_RESOURCE_END; resno++) {
if (pci_resource_len(pdev, resno))
pci_release_resource(pdev, resno);
}
}
static void
_resize_bar(struct drm_i915_private *i915, int resno, resource_size_t size)
{
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
int bar_size = pci_rebar_bytes_to_size(size);
int ret;
_release_bars(pdev);
ret = pci_resize_resource(pdev, resno, bar_size);
if (ret) {
drm_info(&i915->drm, "Failed to resize BAR%d to %dM (%pe)\n",
resno, 1 << bar_size, ERR_PTR(ret));
return;
}
drm_info(&i915->drm, "BAR%d resized to %dM\n", resno, 1 << bar_size);
}
static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t lmem_size)
{
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct pci_bus *root = pdev->bus;
struct resource *root_res;
resource_size_t rebar_size;
resource_size_t current_size;
intel_wakeref_t wakeref;
u32 pci_cmd;
int i;
current_size = roundup_pow_of_two(pci_resource_len(pdev, GEN12_LMEM_BAR));
if (i915->params.lmem_bar_size) {
u32 bar_sizes;
rebar_size = i915->params.lmem_bar_size *
(resource_size_t)SZ_1M;
bar_sizes = pci_rebar_get_possible_sizes(pdev, GEN12_LMEM_BAR);
if (rebar_size == current_size)
return;
if (!(bar_sizes & BIT(pci_rebar_bytes_to_size(rebar_size))) ||
rebar_size >= roundup_pow_of_two(lmem_size)) {
rebar_size = lmem_size;
drm_info(&i915->drm,
"Given bar size is not within supported size, setting it to default: %llu\n",
(u64)lmem_size >> 20);
}
} else {
rebar_size = current_size;
if (rebar_size != roundup_pow_of_two(lmem_size))
rebar_size = lmem_size;
else
return;
}
/* Find out if root bus contains 64bit memory addressing */
while (root->parent)
root = root->parent;
pci_bus_for_each_resource(root, root_res, i) {
if (root_res && root_res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
root_res->start > 0x100000000ull)
break;
}
/* pci_resize_resource will fail anyways */
if (!root_res) {
drm_info(&i915->drm, "Can't resize LMEM BAR - platform support is missing\n");
return;
}
/*
* Releasing forcewake during BAR resizing results in later forcewake
* ack timeouts and former can happen any time - it is asynchronous.
* Grabbing all forcewakes prevents it.
*/
with_intel_runtime_pm(i915->uncore.rpm, wakeref) {
intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
/* First disable PCI memory decoding references */
pci_read_config_dword(pdev, PCI_COMMAND, &pci_cmd);
pci_write_config_dword(pdev, PCI_COMMAND,
pci_cmd & ~PCI_COMMAND_MEMORY);
_resize_bar(i915, GEN12_LMEM_BAR, rebar_size);
pci_assign_unassigned_bus_resources(pdev->bus);
pci_write_config_dword(pdev, PCI_COMMAND, pci_cmd);
intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
}
}
#else
static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t lmem_size) {}
#endif
static int
region_lmem_release(struct intel_memory_region *mem)
{
int ret;
ret = intel_region_ttm_fini(mem);
io_mapping_fini(&mem->iomap);
return ret;
}
static int
region_lmem_init(struct intel_memory_region *mem)
{
int ret;
if (!io_mapping_init_wc(&mem->iomap,
mem->io_start,
mem->io_size))
return -EIO;
ret = intel_region_ttm_init(mem);
if (ret)
goto out_no_buddy;
return 0;
out_no_buddy:
io_mapping_fini(&mem->iomap);
return ret;
}
static const struct intel_memory_region_ops intel_region_lmem_ops = {
.init = region_lmem_init,
.release = region_lmem_release,
.init_object = __i915_gem_ttm_object_init,
};
static bool get_legacy_lowmem_region(struct intel_uncore *uncore,
u64 *start, u32 *size)
{
if (!IS_DG1(uncore->i915))
return false;
*start = 0;
*size = SZ_1M;
drm_dbg(&uncore->i915->drm, "LMEM: reserved legacy low-memory [0x%llx-0x%llx]\n",
*start, *start + *size);
return true;
}
static int reserve_lowmem_region(struct intel_uncore *uncore,
struct intel_memory_region *mem)
{
u64 reserve_start;
u32 reserve_size;
int ret;
if (!get_legacy_lowmem_region(uncore, &reserve_start, &reserve_size))
return 0;
ret = intel_memory_region_reserve(mem, reserve_start, reserve_size);
if (ret)
drm_err(&uncore->i915->drm, "LMEM: reserving low memory region failed\n");
return ret;
}
static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct intel_memory_region *mem;
resource_size_t min_page_size;
resource_size_t io_start;
resource_size_t io_size;
resource_size_t lmem_size;
int err;
if (!IS_DGFX(i915))
return ERR_PTR(-ENODEV);
if (!i915_pci_resource_valid(pdev, GEN12_LMEM_BAR))
return ERR_PTR(-ENXIO);
if (HAS_FLAT_CCS(i915)) {
resource_size_t lmem_range;
u64 tile_stolen, flat_ccs_base;
lmem_range = intel_gt_mcr_read_any(to_gt(i915), XEHP_TILE0_ADDR_RANGE) & 0xFFFF;
lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT;
lmem_size *= SZ_1G;
flat_ccs_base = intel_gt_mcr_read_any(gt, XEHP_FLAT_CCS_BASE_ADDR);
flat_ccs_base = (flat_ccs_base >> XEHP_CCS_BASE_SHIFT) * SZ_64K;
if (GEM_WARN_ON(lmem_size < flat_ccs_base))
return ERR_PTR(-EIO);
tile_stolen = lmem_size - flat_ccs_base;
/* If the FLAT_CCS_BASE_ADDR register is not populated, flag an error */
if (tile_stolen == lmem_size)
drm_err(&i915->drm,
"CCS_BASE_ADDR register did not have expected value\n");
lmem_size -= tile_stolen;
} else {
/* Stolen starts from GSMBASE without CCS */
lmem_size = intel_uncore_read64(&i915->uncore, GEN12_GSMBASE);
}
i915_resize_lmem_bar(i915, lmem_size);
if (i915->params.lmem_size > 0) {
lmem_size = min_t(resource_size_t, lmem_size,
mul_u32_u32(i915->params.lmem_size, SZ_1M));
}
io_start = pci_resource_start(pdev, GEN12_LMEM_BAR);
io_size = min(pci_resource_len(pdev, GEN12_LMEM_BAR), lmem_size);
if (!io_size)
return ERR_PTR(-EIO);
min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
I915_GTT_PAGE_SIZE_4K;
mem = intel_memory_region_create(i915,
0,
lmem_size,
min_page_size,
io_start,
io_size,
INTEL_MEMORY_LOCAL,
0,
&intel_region_lmem_ops);
if (IS_ERR(mem))
return mem;
err = reserve_lowmem_region(uncore, mem);
if (err)
goto err_region_put;
drm_dbg(&i915->drm, "Local memory: %pR\n", &mem->region);
drm_dbg(&i915->drm, "Local memory IO start: %pa\n",
&mem->io_start);
drm_info(&i915->drm, "Local memory IO size: %pa\n",
&mem->io_size);
drm_info(&i915->drm, "Local memory available: %pa\n",
&lmem_size);
if (io_size < lmem_size)
drm_info(&i915->drm, "Using a reduced BAR size of %lluMiB. Consider enabling 'Resizable BAR' or similar, if available in the BIOS.\n",
(u64)io_size >> 20);
return mem;
err_region_put:
intel_memory_region_destroy(mem);
return ERR_PTR(err);
}
struct intel_memory_region *intel_gt_setup_lmem(struct intel_gt *gt)
{
return setup_lmem(gt);
}
| linux-master | drivers/gpu/drm/i915/gt/intel_region_lmem.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2016 Intel Corporation
*/
#include "gem/i915_gem_context.h"
#include "gt/intel_ring.h"
#include "i915_drv.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
#include "mock_engine.h"
#include "selftests/mock_request.h"
static int mock_timeline_pin(struct intel_timeline *tl)
{
int err;
if (WARN_ON(!i915_gem_object_trylock(tl->hwsp_ggtt->obj, NULL)))
return -EBUSY;
err = intel_timeline_pin_map(tl);
i915_gem_object_unlock(tl->hwsp_ggtt->obj);
if (err)
return err;
atomic_inc(&tl->pin_count);
return 0;
}
static void mock_timeline_unpin(struct intel_timeline *tl)
{
GEM_BUG_ON(!atomic_read(&tl->pin_count));
atomic_dec(&tl->pin_count);
}
static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
{
struct i915_address_space *vm = &ggtt->vm;
struct drm_i915_private *i915 = vm->i915;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj))
return ERR_CAST(obj);
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma))
goto err;
return vma;
err:
i915_gem_object_put(obj);
return vma;
}
static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
{
const unsigned long sz = PAGE_SIZE;
struct intel_ring *ring;
ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
if (!ring)
return NULL;
kref_init(&ring->ref);
ring->size = sz;
ring->effective_size = sz;
ring->vaddr = (void *)(ring + 1);
atomic_set(&ring->pin_count, 1);
ring->vma = create_ring_vma(engine->gt->ggtt, PAGE_SIZE);
if (IS_ERR(ring->vma)) {
kfree(ring);
return NULL;
}
intel_ring_update_space(ring);
return ring;
}
static void mock_ring_free(struct intel_ring *ring)
{
i915_vma_put(ring->vma);
kfree(ring);
}
static struct i915_request *first_request(struct mock_engine *engine)
{
return list_first_entry_or_null(&engine->hw_queue,
struct i915_request,
mock.link);
}
static void advance(struct i915_request *request)
{
list_del_init(&request->mock.link);
i915_request_mark_complete(request);
GEM_BUG_ON(!i915_request_completed(request));
intel_engine_signal_breadcrumbs(request->engine);
}
static void hw_delay_complete(struct timer_list *t)
{
struct mock_engine *engine = from_timer(engine, t, hw_delay);
struct i915_request *request;
unsigned long flags;
spin_lock_irqsave(&engine->hw_lock, flags);
/* Timer fired, first request is complete */
request = first_request(engine);
if (request)
advance(request);
/*
* Also immediately signal any subsequent 0-delay requests, but
* requeue the timer for the next delayed request.
*/
while ((request = first_request(engine))) {
if (request->mock.delay) {
mod_timer(&engine->hw_delay,
jiffies + request->mock.delay);
break;
}
advance(request);
}
spin_unlock_irqrestore(&engine->hw_lock, flags);
}
static void mock_context_unpin(struct intel_context *ce)
{
}
static void mock_context_post_unpin(struct intel_context *ce)
{
i915_vma_unpin(ce->ring->vma);
}
static void mock_context_destroy(struct kref *ref)
{
struct intel_context *ce = container_of(ref, typeof(*ce), ref);
GEM_BUG_ON(intel_context_is_pinned(ce));
if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
mock_ring_free(ce->ring);
mock_timeline_unpin(ce->timeline);
}
intel_context_fini(ce);
intel_context_free(ce);
}
static int mock_context_alloc(struct intel_context *ce)
{
int err;
ce->ring = mock_ring(ce->engine);
if (!ce->ring)
return -ENOMEM;
ce->timeline = intel_timeline_create(ce->engine->gt);
if (IS_ERR(ce->timeline)) {
kfree(ce->engine);
return PTR_ERR(ce->timeline);
}
err = mock_timeline_pin(ce->timeline);
if (err) {
intel_timeline_put(ce->timeline);
ce->timeline = NULL;
return err;
}
return 0;
}
static int mock_context_pre_pin(struct intel_context *ce,
struct i915_gem_ww_ctx *ww, void **unused)
{
return i915_vma_pin_ww(ce->ring->vma, ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
}
static int mock_context_pin(struct intel_context *ce, void *unused)
{
return 0;
}
static void mock_context_reset(struct intel_context *ce)
{
}
static const struct intel_context_ops mock_context_ops = {
.alloc = mock_context_alloc,
.pre_pin = mock_context_pre_pin,
.pin = mock_context_pin,
.unpin = mock_context_unpin,
.post_unpin = mock_context_post_unpin,
.enter = intel_context_enter_engine,
.exit = intel_context_exit_engine,
.reset = mock_context_reset,
.destroy = mock_context_destroy,
};
static int mock_request_alloc(struct i915_request *request)
{
INIT_LIST_HEAD(&request->mock.link);
request->mock.delay = 0;
return 0;
}
static int mock_emit_flush(struct i915_request *request,
unsigned int flags)
{
return 0;
}
static u32 *mock_emit_breadcrumb(struct i915_request *request, u32 *cs)
{
return cs;
}
static void mock_submit_request(struct i915_request *request)
{
struct mock_engine *engine =
container_of(request->engine, typeof(*engine), base);
unsigned long flags;
i915_request_submit(request);
spin_lock_irqsave(&engine->hw_lock, flags);
list_add_tail(&request->mock.link, &engine->hw_queue);
if (list_is_first(&request->mock.link, &engine->hw_queue)) {
if (request->mock.delay)
mod_timer(&engine->hw_delay,
jiffies + request->mock.delay);
else
advance(request);
}
spin_unlock_irqrestore(&engine->hw_lock, flags);
}
static void mock_add_to_engine(struct i915_request *rq)
{
lockdep_assert_held(&rq->engine->sched_engine->lock);
list_move_tail(&rq->sched.link, &rq->engine->sched_engine->requests);
}
static void mock_remove_from_engine(struct i915_request *rq)
{
struct intel_engine_cs *engine, *locked;
/*
* Virtual engines complicate acquiring the engine timeline lock,
* as their rq->engine pointer is not stable until under that
* engine lock. The simple ploy we use is to take the lock then
* check that the rq still belongs to the newly locked engine.
*/
locked = READ_ONCE(rq->engine);
spin_lock_irq(&locked->sched_engine->lock);
while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
spin_unlock(&locked->sched_engine->lock);
spin_lock(&engine->sched_engine->lock);
locked = engine;
}
list_del_init(&rq->sched.link);
spin_unlock_irq(&locked->sched_engine->lock);
}
static void mock_reset_prepare(struct intel_engine_cs *engine)
{
}
static void mock_reset_rewind(struct intel_engine_cs *engine, bool stalled)
{
GEM_BUG_ON(stalled);
}
static void mock_reset_cancel(struct intel_engine_cs *engine)
{
struct mock_engine *mock =
container_of(engine, typeof(*mock), base);
struct i915_request *rq;
unsigned long flags;
del_timer_sync(&mock->hw_delay);
spin_lock_irqsave(&engine->sched_engine->lock, flags);
/* Mark all submitted requests as skipped. */
list_for_each_entry(rq, &engine->sched_engine->requests, sched.link)
i915_request_put(i915_request_mark_eio(rq));
intel_engine_signal_breadcrumbs(engine);
/* Cancel and submit all pending requests. */
list_for_each_entry(rq, &mock->hw_queue, mock.link) {
if (i915_request_mark_eio(rq)) {
__i915_request_submit(rq);
i915_request_put(rq);
}
}
INIT_LIST_HEAD(&mock->hw_queue);
spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
}
static void mock_reset_finish(struct intel_engine_cs *engine)
{
}
static void mock_engine_release(struct intel_engine_cs *engine)
{
struct mock_engine *mock =
container_of(engine, typeof(*mock), base);
GEM_BUG_ON(timer_pending(&mock->hw_delay));
i915_sched_engine_put(engine->sched_engine);
intel_breadcrumbs_put(engine->breadcrumbs);
intel_context_unpin(engine->kernel_context);
intel_context_put(engine->kernel_context);
intel_engine_fini_retire(engine);
}
struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
const char *name,
int id)
{
struct mock_engine *engine;
GEM_BUG_ON(id >= I915_NUM_ENGINES);
GEM_BUG_ON(!to_gt(i915)->uncore);
engine = kzalloc(sizeof(*engine) + PAGE_SIZE, GFP_KERNEL);
if (!engine)
return NULL;
/* minimal engine setup for requests */
engine->base.i915 = i915;
engine->base.gt = to_gt(i915);
engine->base.uncore = to_gt(i915)->uncore;
snprintf(engine->base.name, sizeof(engine->base.name), "%s", name);
engine->base.id = id;
engine->base.mask = BIT(id);
engine->base.legacy_idx = INVALID_ENGINE;
engine->base.instance = id;
engine->base.status_page.addr = (void *)(engine + 1);
engine->base.cops = &mock_context_ops;
engine->base.request_alloc = mock_request_alloc;
engine->base.emit_flush = mock_emit_flush;
engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb;
engine->base.submit_request = mock_submit_request;
engine->base.add_active_request = mock_add_to_engine;
engine->base.remove_active_request = mock_remove_from_engine;
engine->base.reset.prepare = mock_reset_prepare;
engine->base.reset.rewind = mock_reset_rewind;
engine->base.reset.cancel = mock_reset_cancel;
engine->base.reset.finish = mock_reset_finish;
engine->base.release = mock_engine_release;
to_gt(i915)->engine[id] = &engine->base;
to_gt(i915)->engine_class[0][id] = &engine->base;
/* fake hw queue */
spin_lock_init(&engine->hw_lock);
timer_setup(&engine->hw_delay, hw_delay_complete, 0);
INIT_LIST_HEAD(&engine->hw_queue);
intel_engine_add_user(&engine->base);
return &engine->base;
}
int mock_engine_init(struct intel_engine_cs *engine)
{
struct intel_context *ce;
INIT_LIST_HEAD(&engine->pinned_contexts_list);
engine->sched_engine = i915_sched_engine_create(ENGINE_MOCK);
if (!engine->sched_engine)
return -ENOMEM;
engine->sched_engine->private_data = engine;
intel_engine_init_execlists(engine);
intel_engine_init__pm(engine);
intel_engine_init_retire(engine);
engine->breadcrumbs = intel_breadcrumbs_create(NULL);
if (!engine->breadcrumbs)
goto err_schedule;
ce = create_kernel_context(engine);
if (IS_ERR(ce))
goto err_breadcrumbs;
/* We insist the kernel context is using the status_page */
engine->status_page.vma = ce->timeline->hwsp_ggtt;
engine->kernel_context = ce;
return 0;
err_breadcrumbs:
intel_breadcrumbs_put(engine->breadcrumbs);
err_schedule:
i915_sched_engine_put(engine->sched_engine);
return -ENOMEM;
}
void mock_engine_flush(struct intel_engine_cs *engine)
{
struct mock_engine *mock =
container_of(engine, typeof(*mock), base);
struct i915_request *request, *rn;
del_timer_sync(&mock->hw_delay);
spin_lock_irq(&mock->hw_lock);
list_for_each_entry_safe(request, rn, &mock->hw_queue, mock.link)
advance(request);
spin_unlock_irq(&mock->hw_lock);
}
void mock_engine_reset(struct intel_engine_cs *engine)
{
}
| linux-master | drivers/gpu/drm/i915/gt/mock_engine.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
#include "i915_drv.h"
#include "intel_breadcrumbs.h"
#include "intel_context.h"
#include "intel_engine.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_rc6.h"
#include "intel_ring.h"
#include "shmem_utils.h"
#include "intel_gt_regs.h"
static void intel_gsc_idle_msg_enable(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
if (IS_METEORLAKE(i915) && engine->id == GSC0) {
intel_uncore_write(engine->gt->uncore,
RC_PSMI_CTRL_GSCCS,
_MASKED_BIT_DISABLE(IDLE_MSG_DISABLE));
/* hysteresis 0xA=5us as recommended in spec*/
intel_uncore_write(engine->gt->uncore,
PWRCTX_MAXCNT_GSCCS,
0xA);
}
}
static void dbg_poison_ce(struct intel_context *ce)
{
if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
return;
if (ce->state) {
struct drm_i915_gem_object *obj = ce->state->obj;
int type = intel_gt_coherent_map_type(ce->engine->gt, obj, true);
void *map;
if (!i915_gem_object_trylock(obj, NULL))
return;
map = i915_gem_object_pin_map(obj, type);
if (!IS_ERR(map)) {
memset(map, CONTEXT_REDZONE, obj->base.size);
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
}
i915_gem_object_unlock(obj);
}
}
static int __engine_unpark(struct intel_wakeref *wf)
{
struct intel_engine_cs *engine =
container_of(wf, typeof(*engine), wakeref);
struct intel_context *ce;
ENGINE_TRACE(engine, "\n");
intel_gt_pm_get(engine->gt);
/* Discard stale context state from across idling */
ce = engine->kernel_context;
if (ce) {
GEM_BUG_ON(test_bit(CONTEXT_VALID_BIT, &ce->flags));
/* Flush all pending HW writes before we touch the context */
while (unlikely(intel_context_inflight(ce)))
intel_engine_flush_submission(engine);
/* First poison the image to verify we never fully trust it */
dbg_poison_ce(ce);
/* Scrub the context image after our loss of control */
ce->ops->reset(ce);
CE_TRACE(ce, "reset { seqno:%x, *hwsp:%x, ring:%x }\n",
ce->timeline->seqno,
READ_ONCE(*ce->timeline->hwsp_seqno),
ce->ring->emit);
GEM_BUG_ON(ce->timeline->seqno !=
READ_ONCE(*ce->timeline->hwsp_seqno));
}
if (engine->unpark)
engine->unpark(engine);
intel_breadcrumbs_unpark(engine->breadcrumbs);
intel_engine_unpark_heartbeat(engine);
return 0;
}
static void duration(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct i915_request *rq = to_request(fence);
ewma__engine_latency_add(&rq->engine->latency,
ktime_us_delta(rq->fence.timestamp,
rq->duration.emitted));
}
static void
__queue_and_release_pm(struct i915_request *rq,
struct intel_timeline *tl,
struct intel_engine_cs *engine)
{
struct intel_gt_timelines *timelines = &engine->gt->timelines;
ENGINE_TRACE(engine, "parking\n");
/*
* Open coded one half of intel_context_enter, which we have to omit
* here (see the large comment below) and because the other part must
* not be called due constructing directly with __i915_request_create
* which increments active count via intel_context_mark_active.
*/
GEM_BUG_ON(rq->context->active_count != 1);
__intel_gt_pm_get(engine->gt);
/*
* We have to serialise all potential retirement paths with our
* submission, as we don't want to underflow either the
* engine->wakeref.counter or our timeline->active_count.
*
* Equally, we cannot allow a new submission to start until
* after we finish queueing, nor could we allow that submitter
* to retire us before we are ready!
*/
spin_lock(&timelines->lock);
/* Let intel_gt_retire_requests() retire us (acquired under lock) */
if (!atomic_fetch_inc(&tl->active_count))
list_add_tail(&tl->link, &timelines->active_list);
/* Hand the request over to HW and so engine_retire() */
__i915_request_queue_bh(rq);
/* Let new submissions commence (and maybe retire this timeline) */
__intel_wakeref_defer_park(&engine->wakeref);
spin_unlock(&timelines->lock);
}
static bool switch_to_kernel_context(struct intel_engine_cs *engine)
{
struct intel_context *ce = engine->kernel_context;
struct i915_request *rq;
bool result = true;
/*
* This is execlist specific behaviour intended to ensure the GPU is
* idle by switching to a known 'safe' context. With GuC submission, the
* same idle guarantee is achieved by other means (disabling
* scheduling). Further, switching to a 'safe' context has no effect
* with GuC submission as the scheduler can just switch back again.
*
* FIXME: Move this backend scheduler specific behaviour into the
* scheduler backend.
*/
if (intel_engine_uses_guc(engine))
return true;
/* GPU is pointing to the void, as good as in the kernel context. */
if (intel_gt_is_wedged(engine->gt))
return true;
GEM_BUG_ON(!intel_context_is_barrier(ce));
GEM_BUG_ON(ce->timeline->hwsp_ggtt != engine->status_page.vma);
/* Already inside the kernel context, safe to power down. */
if (engine->wakeref_serial == engine->serial)
return true;
/*
* Note, we do this without taking the timeline->mutex. We cannot
* as we may be called while retiring the kernel context and so
* already underneath the timeline->mutex. Instead we rely on the
* exclusive property of the __engine_park that prevents anyone
* else from creating a request on this engine. This also requires
* that the ring is empty and we avoid any waits while constructing
* the context, as they assume protection by the timeline->mutex.
* This should hold true as we can only park the engine after
* retiring the last request, thus all rings should be empty and
* all timelines idle.
*
* For unlocking, there are 2 other parties and the GPU who have a
* stake here.
*
* A new gpu user will be waiting on the engine-pm to start their
* engine_unpark. New waiters are predicated on engine->wakeref.count
* and so intel_wakeref_defer_park() acts like a mutex_unlock of the
* engine->wakeref.
*
* The other party is intel_gt_retire_requests(), which is walking the
* list of active timelines looking for completions. Meanwhile as soon
* as we call __i915_request_queue(), the GPU may complete our request.
* Ergo, if we put ourselves on the timelines.active_list
* (se intel_timeline_enter()) before we increment the
* engine->wakeref.count, we may see the request completion and retire
* it causing an underflow of the engine->wakeref.
*/
set_bit(CONTEXT_IS_PARKING, &ce->flags);
GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0);
rq = __i915_request_create(ce, GFP_NOWAIT);
if (IS_ERR(rq))
/* Context switch failed, hope for the best! Maybe reset? */
goto out_unlock;
/* Check again on the next retirement. */
engine->wakeref_serial = engine->serial + 1;
i915_request_add_active_barriers(rq);
/* Install ourselves as a preemption barrier */
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
if (likely(!__i915_request_commit(rq))) { /* engine should be idle! */
/*
* Use an interrupt for precise measurement of duration,
* otherwise we rely on someone else retiring all the requests
* which may delay the signaling (i.e. we will likely wait
* until the background request retirement running every
* second or two).
*/
BUILD_BUG_ON(sizeof(rq->duration) > sizeof(rq->submitq));
dma_fence_add_callback(&rq->fence, &rq->duration.cb, duration);
rq->duration.emitted = ktime_get();
}
/* Expose ourselves to the world */
__queue_and_release_pm(rq, ce->timeline, engine);
result = false;
out_unlock:
clear_bit(CONTEXT_IS_PARKING, &ce->flags);
return result;
}
static void call_idle_barriers(struct intel_engine_cs *engine)
{
struct llist_node *node, *next;
llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
struct dma_fence_cb *cb =
container_of((struct list_head *)node,
typeof(*cb), node);
cb->func(ERR_PTR(-EAGAIN), cb);
}
}
static int __engine_park(struct intel_wakeref *wf)
{
struct intel_engine_cs *engine =
container_of(wf, typeof(*engine), wakeref);
engine->saturated = 0;
/*
* If one and only one request is completed between pm events,
* we know that we are inside the kernel context and it is
* safe to power down. (We are paranoid in case that runtime
* suspend causes corruption to the active context image, and
* want to avoid that impacting userspace.)
*/
if (!switch_to_kernel_context(engine))
return -EBUSY;
ENGINE_TRACE(engine, "parked\n");
call_idle_barriers(engine); /* cleanup after wedging */
intel_engine_park_heartbeat(engine);
intel_breadcrumbs_park(engine->breadcrumbs);
/* Must be reset upon idling, or we may miss the busy wakeup. */
GEM_BUG_ON(engine->sched_engine->queue_priority_hint != INT_MIN);
if (engine->park)
engine->park(engine);
/* While gt calls i915_vma_parked(), we have to break the lock cycle */
intel_gt_pm_put_async(engine->gt);
return 0;
}
static const struct intel_wakeref_ops wf_ops = {
.get = __engine_unpark,
.put = __engine_park,
};
void intel_engine_init__pm(struct intel_engine_cs *engine)
{
intel_wakeref_init(&engine->wakeref, engine->i915, &wf_ops);
intel_engine_init_heartbeat(engine);
intel_gsc_idle_msg_enable(engine);
}
/**
* intel_engine_reset_pinned_contexts - Reset the pinned contexts of
* an engine.
* @engine: The engine whose pinned contexts we want to reset.
*
* Typically the pinned context LMEM images lose or get their content
* corrupted on suspend. This function resets their images.
*/
void intel_engine_reset_pinned_contexts(struct intel_engine_cs *engine)
{
struct intel_context *ce;
list_for_each_entry(ce, &engine->pinned_contexts_list,
pinned_contexts_link) {
/* kernel context gets reset at __engine_unpark() */
if (ce == engine->kernel_context)
continue;
dbg_poison_ce(ce);
ce->ops->reset(ce);
}
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_engine_pm.c"
#endif
| linux-master | drivers/gpu/drm/i915/gt/intel_engine_pm.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2015 Intel Corporation
*/
#include "i915_drv.h"
#include "intel_engine.h"
#include "intel_gt.h"
#include "intel_gt_mcr.h"
#include "intel_gt_regs.h"
#include "intel_mocs.h"
#include "intel_ring.h"
/* structures required */
struct drm_i915_mocs_entry {
u32 control_value;
u16 l3cc_value;
u16 used;
};
struct drm_i915_mocs_table {
unsigned int size;
unsigned int n_entries;
const struct drm_i915_mocs_entry *table;
u8 uc_index;
u8 wb_index; /* Only used on HAS_L3_CCS_READ() platforms */
u8 unused_entries_index;
};
/* Defines for the tables (XXX_MOCS_0 - XXX_MOCS_63) */
#define _LE_CACHEABILITY(value) ((value) << 0)
#define _LE_TGT_CACHE(value) ((value) << 2)
#define LE_LRUM(value) ((value) << 4)
#define LE_AOM(value) ((value) << 6)
#define LE_RSC(value) ((value) << 7)
#define LE_SCC(value) ((value) << 8)
#define LE_PFM(value) ((value) << 11)
#define LE_SCF(value) ((value) << 14)
#define LE_COS(value) ((value) << 15)
#define LE_SSE(value) ((value) << 17)
/* Defines for the tables (GLOB_MOCS_0 - GLOB_MOCS_16) */
#define _L4_CACHEABILITY(value) ((value) << 2)
#define IG_PAT(value) ((value) << 8)
/* Defines for the tables (LNCFMOCS0 - LNCFMOCS31) - two entries per word */
#define L3_ESC(value) ((value) << 0)
#define L3_SCC(value) ((value) << 1)
#define _L3_CACHEABILITY(value) ((value) << 4)
#define L3_GLBGO(value) ((value) << 6)
#define L3_LKUP(value) ((value) << 7)
/* Helper defines */
#define GEN9_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */
#define PVC_NUM_MOCS_ENTRIES 3
#define MTL_NUM_MOCS_ENTRIES 16
/* (e)LLC caching options */
/*
* Note: LE_0_PAGETABLE works only up to Gen11; for newer gens it means
* the same as LE_UC
*/
#define LE_0_PAGETABLE _LE_CACHEABILITY(0)
#define LE_1_UC _LE_CACHEABILITY(1)
#define LE_2_WT _LE_CACHEABILITY(2)
#define LE_3_WB _LE_CACHEABILITY(3)
/* Target cache */
#define LE_TC_0_PAGETABLE _LE_TGT_CACHE(0)
#define LE_TC_1_LLC _LE_TGT_CACHE(1)
#define LE_TC_2_LLC_ELLC _LE_TGT_CACHE(2)
#define LE_TC_3_LLC_ELLC_ALT _LE_TGT_CACHE(3)
/* L3 caching options */
#define L3_0_DIRECT _L3_CACHEABILITY(0)
#define L3_1_UC _L3_CACHEABILITY(1)
#define L3_2_RESERVED _L3_CACHEABILITY(2)
#define L3_3_WB _L3_CACHEABILITY(3)
/* L4 caching options */
#define L4_0_WB _L4_CACHEABILITY(0)
#define L4_1_WT _L4_CACHEABILITY(1)
#define L4_2_RESERVED _L4_CACHEABILITY(2)
#define L4_3_UC _L4_CACHEABILITY(3)
#define MOCS_ENTRY(__idx, __control_value, __l3cc_value) \
[__idx] = { \
.control_value = __control_value, \
.l3cc_value = __l3cc_value, \
.used = 1, \
}
/*
* MOCS tables
*
* These are the MOCS tables that are programmed across all the rings.
* The control value is programmed to all the rings that support the
* MOCS registers. While the l3cc_values are only programmed to the
* LNCFCMOCS0 - LNCFCMOCS32 registers.
*
* These tables are intended to be kept reasonably consistent across
* HW platforms, and for ICL+, be identical across OSes. To achieve
* that, for Icelake and above, list of entries is published as part
* of bspec.
*
* Entries not part of the following tables are undefined as far as
* userspace is concerned and shouldn't be relied upon. For Gen < 12
* they will be initialized to PTE. Gen >= 12 don't have a setting for
* PTE and those platforms except TGL/RKL will be initialized L3 WB to
* catch accidental use of reserved and unused mocs indexes.
*
* The last few entries are reserved by the hardware. For ICL+ they
* should be initialized according to bspec and never used, for older
* platforms they should never be written to.
*
* NOTE1: These tables are part of bspec and defined as part of hardware
* interface for ICL+. For older platforms, they are part of kernel
* ABI. It is expected that, for specific hardware platform, existing
* entries will remain constant and the table will only be updated by
* adding new entries, filling unused positions.
*
* NOTE2: For GEN >= 12 except TGL and RKL, reserved and unspecified MOCS
* indices have been set to L3 WB. These reserved entries should never
* be used, they may be changed to low performant variants with better
* coherency in the future if more entries are needed.
* For TGL/RKL, all the unspecified MOCS indexes are mapped to L3 UC.
*/
#define GEN9_MOCS_ENTRIES \
MOCS_ENTRY(I915_MOCS_UNCACHED, \
LE_1_UC | LE_TC_2_LLC_ELLC, \
L3_1_UC), \
MOCS_ENTRY(I915_MOCS_PTE, \
LE_0_PAGETABLE | LE_TC_0_PAGETABLE | LE_LRUM(3), \
L3_3_WB)
static const struct drm_i915_mocs_entry skl_mocs_table[] = {
GEN9_MOCS_ENTRIES,
MOCS_ENTRY(I915_MOCS_CACHED,
LE_3_WB | LE_TC_2_LLC_ELLC | LE_LRUM(3),
L3_3_WB),
/*
* mocs:63
* - used by the L3 for all of its evictions.
* Thus it is expected to allow LLC cacheability to enable coherent
* flows to be maintained.
* - used to force L3 uncachable cycles.
* Thus it is expected to make the surface L3 uncacheable.
*/
MOCS_ENTRY(63,
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
L3_1_UC)
};
/* NOTE: the LE_TGT_CACHE is not used on Broxton */
static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
GEN9_MOCS_ENTRIES,
MOCS_ENTRY(I915_MOCS_CACHED,
LE_1_UC | LE_TC_2_LLC_ELLC | LE_LRUM(3),
L3_3_WB)
};
#define GEN11_MOCS_ENTRIES \
/* Entries 0 and 1 are defined per-platform */ \
/* Base - L3 + LLC */ \
MOCS_ENTRY(2, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \
L3_3_WB), \
/* Base - Uncached */ \
MOCS_ENTRY(3, \
LE_1_UC | LE_TC_1_LLC, \
L3_1_UC), \
/* Base - L3 */ \
MOCS_ENTRY(4, \
LE_1_UC | LE_TC_1_LLC, \
L3_3_WB), \
/* Base - LLC */ \
MOCS_ENTRY(5, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \
L3_1_UC), \
/* Age 0 - LLC */ \
MOCS_ENTRY(6, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(1), \
L3_1_UC), \
/* Age 0 - L3 + LLC */ \
MOCS_ENTRY(7, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(1), \
L3_3_WB), \
/* Age: Don't Chg. - LLC */ \
MOCS_ENTRY(8, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(2), \
L3_1_UC), \
/* Age: Don't Chg. - L3 + LLC */ \
MOCS_ENTRY(9, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(2), \
L3_3_WB), \
/* No AOM - LLC */ \
MOCS_ENTRY(10, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_AOM(1), \
L3_1_UC), \
/* No AOM - L3 + LLC */ \
MOCS_ENTRY(11, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_AOM(1), \
L3_3_WB), \
/* No AOM; Age 0 - LLC */ \
MOCS_ENTRY(12, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(1) | LE_AOM(1), \
L3_1_UC), \
/* No AOM; Age 0 - L3 + LLC */ \
MOCS_ENTRY(13, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(1) | LE_AOM(1), \
L3_3_WB), \
/* No AOM; Age:DC - LLC */ \
MOCS_ENTRY(14, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \
L3_1_UC), \
/* No AOM; Age:DC - L3 + LLC */ \
MOCS_ENTRY(15, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \
L3_3_WB), \
/* Bypass LLC - Uncached (EHL+) */ \
MOCS_ENTRY(16, \
LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
L3_1_UC), \
/* Bypass LLC - L3 (Read-Only) (EHL+) */ \
MOCS_ENTRY(17, \
LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
L3_3_WB), \
/* Self-Snoop - L3 + LLC */ \
MOCS_ENTRY(18, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3), \
L3_3_WB), \
/* Skip Caching - L3 + LLC(12.5%) */ \
MOCS_ENTRY(19, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(7), \
L3_3_WB), \
/* Skip Caching - L3 + LLC(25%) */ \
MOCS_ENTRY(20, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(3), \
L3_3_WB), \
/* Skip Caching - L3 + LLC(50%) */ \
MOCS_ENTRY(21, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(1), \
L3_3_WB), \
/* Skip Caching - L3 + LLC(75%) */ \
MOCS_ENTRY(22, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_RSC(1) | LE_SCC(3), \
L3_3_WB), \
/* Skip Caching - L3 + LLC(87.5%) */ \
MOCS_ENTRY(23, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_RSC(1) | LE_SCC(7), \
L3_3_WB), \
/* HW Reserved - SW program but never use */ \
MOCS_ENTRY(62, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \
L3_1_UC), \
/* HW Reserved - SW program but never use */ \
MOCS_ENTRY(63, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \
L3_1_UC)
static const struct drm_i915_mocs_entry tgl_mocs_table[] = {
/*
* NOTE:
* Reserved and unspecified MOCS indices have been set to (L3 + LCC).
* These reserved entries should never be used, they may be changed
* to low performant variants with better coherency in the future if
* more entries are needed. We are programming index I915_MOCS_PTE(1)
* only, __init_mocs_table() take care to program unused index with
* this entry.
*/
MOCS_ENTRY(I915_MOCS_PTE,
LE_0_PAGETABLE | LE_TC_0_PAGETABLE,
L3_1_UC),
GEN11_MOCS_ENTRIES,
/* Implicitly enable L1 - HDC:L1 + L3 + LLC */
MOCS_ENTRY(48,
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
L3_3_WB),
/* Implicitly enable L1 - HDC:L1 + L3 */
MOCS_ENTRY(49,
LE_1_UC | LE_TC_1_LLC,
L3_3_WB),
/* Implicitly enable L1 - HDC:L1 + LLC */
MOCS_ENTRY(50,
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
L3_1_UC),
/* Implicitly enable L1 - HDC:L1 */
MOCS_ENTRY(51,
LE_1_UC | LE_TC_1_LLC,
L3_1_UC),
/* HW Special Case (CCS) */
MOCS_ENTRY(60,
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
L3_1_UC),
/* HW Special Case (Displayable) */
MOCS_ENTRY(61,
LE_1_UC | LE_TC_1_LLC,
L3_3_WB),
};
static const struct drm_i915_mocs_entry icl_mocs_table[] = {
/* Base - Uncached (Deprecated) */
MOCS_ENTRY(I915_MOCS_UNCACHED,
LE_1_UC | LE_TC_1_LLC,
L3_1_UC),
/* Base - L3 + LeCC:PAT (Deprecated) */
MOCS_ENTRY(I915_MOCS_PTE,
LE_0_PAGETABLE | LE_TC_0_PAGETABLE,
L3_3_WB),
GEN11_MOCS_ENTRIES
};
static const struct drm_i915_mocs_entry dg1_mocs_table[] = {
/* UC */
MOCS_ENTRY(1, 0, L3_1_UC),
/* WB - L3 */
MOCS_ENTRY(5, 0, L3_3_WB),
/* WB - L3 50% */
MOCS_ENTRY(6, 0, L3_ESC(1) | L3_SCC(1) | L3_3_WB),
/* WB - L3 25% */
MOCS_ENTRY(7, 0, L3_ESC(1) | L3_SCC(3) | L3_3_WB),
/* WB - L3 12.5% */
MOCS_ENTRY(8, 0, L3_ESC(1) | L3_SCC(7) | L3_3_WB),
/* HDC:L1 + L3 */
MOCS_ENTRY(48, 0, L3_3_WB),
/* HDC:L1 */
MOCS_ENTRY(49, 0, L3_1_UC),
/* HW Reserved */
MOCS_ENTRY(60, 0, L3_1_UC),
MOCS_ENTRY(61, 0, L3_1_UC),
MOCS_ENTRY(62, 0, L3_1_UC),
MOCS_ENTRY(63, 0, L3_1_UC),
};
static const struct drm_i915_mocs_entry gen12_mocs_table[] = {
GEN11_MOCS_ENTRIES,
/* Implicitly enable L1 - HDC:L1 + L3 + LLC */
MOCS_ENTRY(48,
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
L3_3_WB),
/* Implicitly enable L1 - HDC:L1 + L3 */
MOCS_ENTRY(49,
LE_1_UC | LE_TC_1_LLC,
L3_3_WB),
/* Implicitly enable L1 - HDC:L1 + LLC */
MOCS_ENTRY(50,
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
L3_1_UC),
/* Implicitly enable L1 - HDC:L1 */
MOCS_ENTRY(51,
LE_1_UC | LE_TC_1_LLC,
L3_1_UC),
/* HW Special Case (CCS) */
MOCS_ENTRY(60,
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
L3_1_UC),
/* HW Special Case (Displayable) */
MOCS_ENTRY(61,
LE_1_UC | LE_TC_1_LLC,
L3_3_WB),
};
static const struct drm_i915_mocs_entry xehpsdv_mocs_table[] = {
/* wa_1608975824 */
MOCS_ENTRY(0, 0, L3_3_WB | L3_LKUP(1)),
/* UC - Coherent; GO:L3 */
MOCS_ENTRY(1, 0, L3_1_UC | L3_LKUP(1)),
/* UC - Coherent; GO:Memory */
MOCS_ENTRY(2, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)),
/* UC - Non-Coherent; GO:Memory */
MOCS_ENTRY(3, 0, L3_1_UC | L3_GLBGO(1)),
/* UC - Non-Coherent; GO:L3 */
MOCS_ENTRY(4, 0, L3_1_UC),
/* WB */
MOCS_ENTRY(5, 0, L3_3_WB | L3_LKUP(1)),
/* HW Reserved - SW program but never use. */
MOCS_ENTRY(48, 0, L3_3_WB | L3_LKUP(1)),
MOCS_ENTRY(49, 0, L3_1_UC | L3_LKUP(1)),
MOCS_ENTRY(60, 0, L3_1_UC),
MOCS_ENTRY(61, 0, L3_1_UC),
MOCS_ENTRY(62, 0, L3_1_UC),
MOCS_ENTRY(63, 0, L3_1_UC),
};
static const struct drm_i915_mocs_entry dg2_mocs_table[] = {
/* UC - Coherent; GO:L3 */
MOCS_ENTRY(0, 0, L3_1_UC | L3_LKUP(1)),
/* UC - Coherent; GO:Memory */
MOCS_ENTRY(1, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)),
/* UC - Non-Coherent; GO:Memory */
MOCS_ENTRY(2, 0, L3_1_UC | L3_GLBGO(1)),
/* WB - LC */
MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)),
};
static const struct drm_i915_mocs_entry dg2_mocs_table_g10_ax[] = {
/* Wa_14011441408: Set Go to Memory for MOCS#0 */
MOCS_ENTRY(0, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)),
/* UC - Coherent; GO:Memory */
MOCS_ENTRY(1, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)),
/* UC - Non-Coherent; GO:Memory */
MOCS_ENTRY(2, 0, L3_1_UC | L3_GLBGO(1)),
/* WB - LC */
MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)),
};
static const struct drm_i915_mocs_entry pvc_mocs_table[] = {
/* Error */
MOCS_ENTRY(0, 0, L3_3_WB),
/* UC */
MOCS_ENTRY(1, 0, L3_1_UC),
/* WB */
MOCS_ENTRY(2, 0, L3_3_WB),
};
static const struct drm_i915_mocs_entry mtl_mocs_table[] = {
/* Error - Reserved for Non-Use */
MOCS_ENTRY(0,
IG_PAT(0),
L3_LKUP(1) | L3_3_WB),
/* Cached - L3 + L4 */
MOCS_ENTRY(1,
IG_PAT(1),
L3_LKUP(1) | L3_3_WB),
/* L4 - GO:L3 */
MOCS_ENTRY(2,
IG_PAT(1),
L3_LKUP(1) | L3_1_UC),
/* Uncached - GO:L3 */
MOCS_ENTRY(3,
IG_PAT(1) | L4_3_UC,
L3_LKUP(1) | L3_1_UC),
/* L4 - GO:Mem */
MOCS_ENTRY(4,
IG_PAT(1),
L3_LKUP(1) | L3_GLBGO(1) | L3_1_UC),
/* Uncached - GO:Mem */
MOCS_ENTRY(5,
IG_PAT(1) | L4_3_UC,
L3_LKUP(1) | L3_GLBGO(1) | L3_1_UC),
/* L4 - L3:NoLKUP; GO:L3 */
MOCS_ENTRY(6,
IG_PAT(1),
L3_1_UC),
/* Uncached - L3:NoLKUP; GO:L3 */
MOCS_ENTRY(7,
IG_PAT(1) | L4_3_UC,
L3_1_UC),
/* L4 - L3:NoLKUP; GO:Mem */
MOCS_ENTRY(8,
IG_PAT(1),
L3_GLBGO(1) | L3_1_UC),
/* Uncached - L3:NoLKUP; GO:Mem */
MOCS_ENTRY(9,
IG_PAT(1) | L4_3_UC,
L3_GLBGO(1) | L3_1_UC),
/* Display - L3; L4:WT */
MOCS_ENTRY(14,
IG_PAT(1) | L4_1_WT,
L3_LKUP(1) | L3_3_WB),
/* CCS - Non-Displayable */
MOCS_ENTRY(15,
IG_PAT(1),
L3_GLBGO(1) | L3_1_UC),
};
enum {
HAS_GLOBAL_MOCS = BIT(0),
HAS_ENGINE_MOCS = BIT(1),
HAS_RENDER_L3CC = BIT(2),
};
static bool has_l3cc(const struct drm_i915_private *i915)
{
return true;
}
static bool has_global_mocs(const struct drm_i915_private *i915)
{
return HAS_GLOBAL_MOCS_REGISTERS(i915);
}
static bool has_mocs(const struct drm_i915_private *i915)
{
return !IS_DGFX(i915);
}
static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
struct drm_i915_mocs_table *table)
{
unsigned int flags;
memset(table, 0, sizeof(struct drm_i915_mocs_table));
table->unused_entries_index = I915_MOCS_PTE;
if (IS_METEORLAKE(i915)) {
table->size = ARRAY_SIZE(mtl_mocs_table);
table->table = mtl_mocs_table;
table->n_entries = MTL_NUM_MOCS_ENTRIES;
table->uc_index = 9;
table->unused_entries_index = 1;
} else if (IS_PONTEVECCHIO(i915)) {
table->size = ARRAY_SIZE(pvc_mocs_table);
table->table = pvc_mocs_table;
table->n_entries = PVC_NUM_MOCS_ENTRIES;
table->uc_index = 1;
table->wb_index = 2;
table->unused_entries_index = 2;
} else if (IS_DG2(i915)) {
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) {
table->size = ARRAY_SIZE(dg2_mocs_table_g10_ax);
table->table = dg2_mocs_table_g10_ax;
} else {
table->size = ARRAY_SIZE(dg2_mocs_table);
table->table = dg2_mocs_table;
}
table->uc_index = 1;
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
table->unused_entries_index = 3;
} else if (IS_XEHPSDV(i915)) {
table->size = ARRAY_SIZE(xehpsdv_mocs_table);
table->table = xehpsdv_mocs_table;
table->uc_index = 2;
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
table->unused_entries_index = 5;
} else if (IS_DG1(i915)) {
table->size = ARRAY_SIZE(dg1_mocs_table);
table->table = dg1_mocs_table;
table->uc_index = 1;
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
table->uc_index = 1;
table->unused_entries_index = 5;
} else if (IS_TIGERLAKE(i915) || IS_ROCKETLAKE(i915)) {
/* For TGL/RKL, Can't be changed now for ABI reasons */
table->size = ARRAY_SIZE(tgl_mocs_table);
table->table = tgl_mocs_table;
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
table->uc_index = 3;
} else if (GRAPHICS_VER(i915) >= 12) {
table->size = ARRAY_SIZE(gen12_mocs_table);
table->table = gen12_mocs_table;
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
table->uc_index = 3;
table->unused_entries_index = 2;
} else if (GRAPHICS_VER(i915) == 11) {
table->size = ARRAY_SIZE(icl_mocs_table);
table->table = icl_mocs_table;
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
} else if (IS_GEN9_BC(i915)) {
table->size = ARRAY_SIZE(skl_mocs_table);
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
table->table = skl_mocs_table;
} else if (IS_GEN9_LP(i915)) {
table->size = ARRAY_SIZE(broxton_mocs_table);
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
table->table = broxton_mocs_table;
} else {
drm_WARN_ONCE(&i915->drm, GRAPHICS_VER(i915) >= 9,
"Platform that should have a MOCS table does not.\n");
return 0;
}
if (GEM_DEBUG_WARN_ON(table->size > table->n_entries))
return 0;
/* WaDisableSkipCaching:skl,bxt,kbl,glk */
if (GRAPHICS_VER(i915) == 9) {
int i;
for (i = 0; i < table->size; i++)
if (GEM_DEBUG_WARN_ON(table->table[i].l3cc_value &
(L3_ESC(1) | L3_SCC(0x7))))
return 0;
}
flags = 0;
if (has_mocs(i915)) {
if (has_global_mocs(i915))
flags |= HAS_GLOBAL_MOCS;
else
flags |= HAS_ENGINE_MOCS;
}
if (has_l3cc(i915))
flags |= HAS_RENDER_L3CC;
return flags;
}
/*
* Get control_value from MOCS entry taking into account when it's not used
* then if unused_entries_index is non-zero then its value will be returned
* otherwise I915_MOCS_PTE's value is returned in this case.
*/
static u32 get_entry_control(const struct drm_i915_mocs_table *table,
unsigned int index)
{
if (index < table->size && table->table[index].used)
return table->table[index].control_value;
return table->table[table->unused_entries_index].control_value;
}
#define for_each_mocs(mocs, t, i) \
for (i = 0; \
i < (t)->n_entries ? (mocs = get_entry_control((t), i)), 1 : 0;\
i++)
static void __init_mocs_table(struct intel_uncore *uncore,
const struct drm_i915_mocs_table *table,
u32 addr)
{
unsigned int i;
u32 mocs;
drm_WARN_ONCE(&uncore->i915->drm, !table->unused_entries_index,
"Unused entries index should have been defined\n");
for_each_mocs(mocs, table, i)
intel_uncore_write_fw(uncore, _MMIO(addr + i * 4), mocs);
}
static u32 mocs_offset(const struct intel_engine_cs *engine)
{
static const u32 offset[] = {
[RCS0] = __GEN9_RCS0_MOCS0,
[VCS0] = __GEN9_VCS0_MOCS0,
[VCS1] = __GEN9_VCS1_MOCS0,
[VECS0] = __GEN9_VECS0_MOCS0,
[BCS0] = __GEN9_BCS0_MOCS0,
[VCS2] = __GEN11_VCS2_MOCS0,
};
GEM_BUG_ON(engine->id >= ARRAY_SIZE(offset));
return offset[engine->id];
}
static void init_mocs_table(struct intel_engine_cs *engine,
const struct drm_i915_mocs_table *table)
{
__init_mocs_table(engine->uncore, table, mocs_offset(engine));
}
/*
* Get l3cc_value from MOCS entry taking into account when it's not used
* then if unused_entries_index is not zero then its value will be returned
* otherwise I915_MOCS_PTE's value is returned in this case.
*/
static u16 get_entry_l3cc(const struct drm_i915_mocs_table *table,
unsigned int index)
{
if (index < table->size && table->table[index].used)
return table->table[index].l3cc_value;
return table->table[table->unused_entries_index].l3cc_value;
}
static u32 l3cc_combine(u16 low, u16 high)
{
return low | (u32)high << 16;
}
#define for_each_l3cc(l3cc, t, i) \
for (i = 0; \
i < ((t)->n_entries + 1) / 2 ? \
(l3cc = l3cc_combine(get_entry_l3cc((t), 2 * i), \
get_entry_l3cc((t), 2 * i + 1))), 1 : \
0; \
i++)
static void init_l3cc_table(struct intel_gt *gt,
const struct drm_i915_mocs_table *table)
{
unsigned long flags;
unsigned int i;
u32 l3cc;
intel_gt_mcr_lock(gt, &flags);
for_each_l3cc(l3cc, table, i)
if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50))
intel_gt_mcr_multicast_write_fw(gt, XEHP_LNCFCMOCS(i), l3cc);
else
intel_uncore_write_fw(gt->uncore, GEN9_LNCFCMOCS(i), l3cc);
intel_gt_mcr_unlock(gt, flags);
}
void intel_mocs_init_engine(struct intel_engine_cs *engine)
{
struct drm_i915_mocs_table table;
unsigned int flags;
/* Called under a blanket forcewake */
assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
flags = get_mocs_settings(engine->i915, &table);
if (!flags)
return;
/* Platforms with global MOCS do not need per-engine initialization. */
if (flags & HAS_ENGINE_MOCS)
init_mocs_table(engine, &table);
if (flags & HAS_RENDER_L3CC && engine->class == RENDER_CLASS)
init_l3cc_table(engine->gt, &table);
}
static u32 global_mocs_offset(void)
{
return i915_mmio_reg_offset(GEN12_GLOBAL_MOCS(0));
}
void intel_set_mocs_index(struct intel_gt *gt)
{
struct drm_i915_mocs_table table;
get_mocs_settings(gt->i915, &table);
gt->mocs.uc_index = table.uc_index;
if (HAS_L3_CCS_READ(gt->i915))
gt->mocs.wb_index = table.wb_index;
}
void intel_mocs_init(struct intel_gt *gt)
{
struct drm_i915_mocs_table table;
unsigned int flags;
/*
* LLC and eDRAM control values are not applicable to dgfx
*/
flags = get_mocs_settings(gt->i915, &table);
if (flags & HAS_GLOBAL_MOCS)
__init_mocs_table(gt->uncore, &table, global_mocs_offset());
/*
* Initialize the L3CC table as part of mocs initalization to make
* sure the LNCFCMOCSx registers are programmed for the subsequent
* memory transactions including guc transactions
*/
if (flags & HAS_RENDER_L3CC)
init_l3cc_table(gt, &table);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_mocs.c"
#endif
| linux-master | drivers/gpu/drm/i915/gt/intel_mocs.c |
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2017-2018 Intel Corporation
*/
#include "../intel_timeline.h"
#include "mock_timeline.h"
void mock_timeline_init(struct intel_timeline *timeline, u64 context)
{
timeline->gt = NULL;
timeline->fence_context = context;
mutex_init(&timeline->mutex);
INIT_ACTIVE_FENCE(&timeline->last_request);
INIT_LIST_HEAD(&timeline->requests);
i915_syncmap_init(&timeline->sync);
INIT_LIST_HEAD(&timeline->link);
}
void mock_timeline_fini(struct intel_timeline *timeline)
{
i915_syncmap_free(&timeline->sync);
}
| linux-master | drivers/gpu/drm/i915/gt/selftests/mock_timeline.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
#include <drm/drm_print.h>
#include "gt/intel_gt_debugfs.h"
#include "gt/uc/intel_guc_ads.h"
#include "gt/uc/intel_guc_ct.h"
#include "gt/uc/intel_guc_slpc.h"
#include "gt/uc/intel_guc_submission.h"
#include "intel_guc.h"
#include "intel_guc_debugfs.h"
#include "intel_guc_log_debugfs.h"
static int guc_info_show(struct seq_file *m, void *data)
{
struct intel_guc *guc = m->private;
struct drm_printer p = drm_seq_file_printer(m);
if (!intel_guc_is_supported(guc))
return -ENODEV;
intel_guc_load_status(guc, &p);
drm_puts(&p, "\n");
intel_guc_log_info(&guc->log, &p);
if (!intel_guc_submission_is_used(guc))
return 0;
intel_guc_ct_print_info(&guc->ct, &p);
intel_guc_submission_print_info(guc, &p);
intel_guc_ads_print_policy_info(guc, &p);
return 0;
}
DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(guc_info);
static int guc_registered_contexts_show(struct seq_file *m, void *data)
{
struct intel_guc *guc = m->private;
struct drm_printer p = drm_seq_file_printer(m);
if (!intel_guc_submission_is_used(guc))
return -ENODEV;
intel_guc_submission_print_context_info(guc, &p);
return 0;
}
DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(guc_registered_contexts);
static int guc_slpc_info_show(struct seq_file *m, void *unused)
{
struct intel_guc *guc = m->private;
struct intel_guc_slpc *slpc = &guc->slpc;
struct drm_printer p = drm_seq_file_printer(m);
if (!intel_guc_slpc_is_used(guc))
return -ENODEV;
return intel_guc_slpc_print_info(slpc, &p);
}
DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(guc_slpc_info);
static bool intel_eval_slpc_support(void *data)
{
struct intel_guc *guc = (struct intel_guc *)data;
return intel_guc_slpc_is_used(guc);
}
static int guc_sched_disable_delay_ms_get(void *data, u64 *val)
{
struct intel_guc *guc = data;
if (!intel_guc_submission_is_used(guc))
return -ENODEV;
*val = (u64)guc->submission_state.sched_disable_delay_ms;
return 0;
}
static int guc_sched_disable_delay_ms_set(void *data, u64 val)
{
struct intel_guc *guc = data;
if (!intel_guc_submission_is_used(guc))
return -ENODEV;
/* clamp to a practical limit, 1 minute is reasonable for a longest delay */
guc->submission_state.sched_disable_delay_ms = min_t(u64, val, 60000);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(guc_sched_disable_delay_ms_fops,
guc_sched_disable_delay_ms_get,
guc_sched_disable_delay_ms_set, "%lld\n");
static int guc_sched_disable_gucid_threshold_get(void *data, u64 *val)
{
struct intel_guc *guc = data;
if (!intel_guc_submission_is_used(guc))
return -ENODEV;
*val = guc->submission_state.sched_disable_gucid_threshold;
return 0;
}
static int guc_sched_disable_gucid_threshold_set(void *data, u64 val)
{
struct intel_guc *guc = data;
if (!intel_guc_submission_is_used(guc))
return -ENODEV;
if (val > intel_guc_sched_disable_gucid_threshold_max(guc))
guc->submission_state.sched_disable_gucid_threshold =
intel_guc_sched_disable_gucid_threshold_max(guc);
else
guc->submission_state.sched_disable_gucid_threshold = val;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(guc_sched_disable_gucid_threshold_fops,
guc_sched_disable_gucid_threshold_get,
guc_sched_disable_gucid_threshold_set, "%lld\n");
void intel_guc_debugfs_register(struct intel_guc *guc, struct dentry *root)
{
static const struct intel_gt_debugfs_file files[] = {
{ "guc_info", &guc_info_fops, NULL },
{ "guc_registered_contexts", &guc_registered_contexts_fops, NULL },
{ "guc_slpc_info", &guc_slpc_info_fops, &intel_eval_slpc_support},
{ "guc_sched_disable_delay_ms", &guc_sched_disable_delay_ms_fops, NULL },
{ "guc_sched_disable_gucid_threshold", &guc_sched_disable_gucid_threshold_fops,
NULL },
};
if (!intel_guc_is_supported(guc))
return;
intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), guc);
intel_guc_log_debugfs_register(&guc->log, root);
}
| linux-master | drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2014 Intel Corporation
*/
#include <linux/circ_buf.h>
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_lmem.h"
#include "gt/gen8_engine_cs.h"
#include "gt/intel_breadcrumbs.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_clock_utils.h"
#include "gt/intel_gt_irq.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_regs.h"
#include "gt/intel_gt_requests.h"
#include "gt/intel_lrc.h"
#include "gt/intel_lrc_reg.h"
#include "gt/intel_mocs.h"
#include "gt/intel_ring.h"
#include "intel_guc_ads.h"
#include "intel_guc_capture.h"
#include "intel_guc_print.h"
#include "intel_guc_submission.h"
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_trace.h"
/**
* DOC: GuC-based command submission
*
* The Scratch registers:
* There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
* a value to the action register (SOFT_SCRATCH_0) along with any data. It then
* triggers an interrupt on the GuC via another register write (0xC4C8).
* Firmware writes a success/fail code back to the action register after
* processes the request. The kernel driver polls waiting for this update and
* then proceeds.
*
* Command Transport buffers (CTBs):
* Covered in detail in other sections but CTBs (Host to GuC - H2G, GuC to Host
* - G2H) are a message interface between the i915 and GuC.
*
* Context registration:
* Before a context can be submitted it must be registered with the GuC via a
* H2G. A unique guc_id is associated with each context. The context is either
* registered at request creation time (normal operation) or at submission time
* (abnormal operation, e.g. after a reset).
*
* Context submission:
* The i915 updates the LRC tail value in memory. The i915 must enable the
* scheduling of the context within the GuC for the GuC to actually consider it.
* Therefore, the first time a disabled context is submitted we use a schedule
* enable H2G, while follow up submissions are done via the context submit H2G,
* which informs the GuC that a previously enabled context has new work
* available.
*
* Context unpin:
* To unpin a context a H2G is used to disable scheduling. When the
* corresponding G2H returns indicating the scheduling disable operation has
* completed it is safe to unpin the context. While a disable is in flight it
* isn't safe to resubmit the context so a fence is used to stall all future
* requests of that context until the G2H is returned. Because this interaction
* with the GuC takes a non-zero amount of time we delay the disabling of
* scheduling after the pin count goes to zero by a configurable period of time
* (see SCHED_DISABLE_DELAY_MS). The thought is this gives the user a window of
* time to resubmit something on the context before doing this costly operation.
* This delay is only done if the context isn't closed and the guc_id usage is
* less than a threshold (see NUM_SCHED_DISABLE_GUC_IDS_THRESHOLD).
*
* Context deregistration:
* Before a context can be destroyed or if we steal its guc_id we must
* deregister the context with the GuC via H2G. If stealing the guc_id it isn't
* safe to submit anything to this guc_id until the deregister completes so a
* fence is used to stall all requests associated with this guc_id until the
* corresponding G2H returns indicating the guc_id has been deregistered.
*
* submission_state.guc_ids:
* Unique number associated with private GuC context data passed in during
* context registration / submission / deregistration. 64k available. Simple ida
* is used for allocation.
*
* Stealing guc_ids:
* If no guc_ids are available they can be stolen from another context at
* request creation time if that context is unpinned. If a guc_id can't be found
* we punt this problem to the user as we believe this is near impossible to hit
* during normal use cases.
*
* Locking:
* In the GuC submission code we have 3 basic spin locks which protect
* everything. Details about each below.
*
* sched_engine->lock
* This is the submission lock for all contexts that share an i915 schedule
* engine (sched_engine), thus only one of the contexts which share a
* sched_engine can be submitting at a time. Currently only one sched_engine is
* used for all of GuC submission but that could change in the future.
*
* guc->submission_state.lock
* Global lock for GuC submission state. Protects guc_ids and destroyed contexts
* list.
*
* ce->guc_state.lock
* Protects everything under ce->guc_state. Ensures that a context is in the
* correct state before issuing a H2G. e.g. We don't issue a schedule disable
* on a disabled context (bad idea), we don't issue a schedule enable when a
* schedule disable is in flight, etc... Also protects list of inflight requests
* on the context and the priority management state. Lock is individual to each
* context.
*
* Lock ordering rules:
* sched_engine->lock -> ce->guc_state.lock
* guc->submission_state.lock -> ce->guc_state.lock
*
* Reset races:
* When a full GT reset is triggered it is assumed that some G2H responses to
* H2Gs can be lost as the GuC is also reset. Losing these G2H can prove to be
* fatal as we do certain operations upon receiving a G2H (e.g. destroy
* contexts, release guc_ids, etc...). When this occurs we can scrub the
* context state and cleanup appropriately, however this is quite racey.
* To avoid races, the reset code must disable submission before scrubbing for
* the missing G2H, while the submission code must check for submission being
* disabled and skip sending H2Gs and updating context states when it is. Both
* sides must also make sure to hold the relevant locks.
*/
/* GuC Virtual Engine */
struct guc_virtual_engine {
struct intel_engine_cs base;
struct intel_context context;
};
static struct intel_context *
guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
unsigned long flags);
static struct intel_context *
guc_create_parallel(struct intel_engine_cs **engines,
unsigned int num_siblings,
unsigned int width);
#define GUC_REQUEST_SIZE 64 /* bytes */
/*
* We reserve 1/16 of the guc_ids for multi-lrc as these need to be contiguous
* per the GuC submission interface. A different allocation algorithm is used
* (bitmap vs. ida) between multi-lrc and single-lrc hence the reason to
* partition the guc_id space. We believe the number of multi-lrc contexts in
* use should be low and 1/16 should be sufficient. Minimum of 32 guc_ids for
* multi-lrc.
*/
#define NUMBER_MULTI_LRC_GUC_ID(guc) \
((guc)->submission_state.num_guc_ids / 16)
/*
* Below is a set of functions which control the GuC scheduling state which
* require a lock.
*/
#define SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER BIT(0)
#define SCHED_STATE_DESTROYED BIT(1)
#define SCHED_STATE_PENDING_DISABLE BIT(2)
#define SCHED_STATE_BANNED BIT(3)
#define SCHED_STATE_ENABLED BIT(4)
#define SCHED_STATE_PENDING_ENABLE BIT(5)
#define SCHED_STATE_REGISTERED BIT(6)
#define SCHED_STATE_POLICY_REQUIRED BIT(7)
#define SCHED_STATE_CLOSED BIT(8)
#define SCHED_STATE_BLOCKED_SHIFT 9
#define SCHED_STATE_BLOCKED BIT(SCHED_STATE_BLOCKED_SHIFT)
#define SCHED_STATE_BLOCKED_MASK (0xfff << SCHED_STATE_BLOCKED_SHIFT)
static inline void init_sched_state(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state &= SCHED_STATE_BLOCKED_MASK;
}
/*
* Kernel contexts can have SCHED_STATE_REGISTERED after suspend.
* A context close can race with the submission path, so SCHED_STATE_CLOSED
* can be set immediately before we try to register.
*/
#define SCHED_STATE_VALID_INIT \
(SCHED_STATE_BLOCKED_MASK | \
SCHED_STATE_CLOSED | \
SCHED_STATE_REGISTERED)
__maybe_unused
static bool sched_state_is_init(struct intel_context *ce)
{
return !(ce->guc_state.sched_state & ~SCHED_STATE_VALID_INIT);
}
static inline bool
context_wait_for_deregister_to_register(struct intel_context *ce)
{
return ce->guc_state.sched_state &
SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
}
static inline void
set_context_wait_for_deregister_to_register(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state |=
SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
}
static inline void
clr_context_wait_for_deregister_to_register(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state &=
~SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
}
static inline bool
context_destroyed(struct intel_context *ce)
{
return ce->guc_state.sched_state & SCHED_STATE_DESTROYED;
}
static inline void
set_context_destroyed(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state |= SCHED_STATE_DESTROYED;
}
static inline bool context_pending_disable(struct intel_context *ce)
{
return ce->guc_state.sched_state & SCHED_STATE_PENDING_DISABLE;
}
static inline void set_context_pending_disable(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state |= SCHED_STATE_PENDING_DISABLE;
}
static inline void clr_context_pending_disable(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_DISABLE;
}
static inline bool context_banned(struct intel_context *ce)
{
return ce->guc_state.sched_state & SCHED_STATE_BANNED;
}
static inline void set_context_banned(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state |= SCHED_STATE_BANNED;
}
static inline void clr_context_banned(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state &= ~SCHED_STATE_BANNED;
}
static inline bool context_enabled(struct intel_context *ce)
{
return ce->guc_state.sched_state & SCHED_STATE_ENABLED;
}
static inline void set_context_enabled(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state |= SCHED_STATE_ENABLED;
}
static inline void clr_context_enabled(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state &= ~SCHED_STATE_ENABLED;
}
static inline bool context_pending_enable(struct intel_context *ce)
{
return ce->guc_state.sched_state & SCHED_STATE_PENDING_ENABLE;
}
static inline void set_context_pending_enable(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state |= SCHED_STATE_PENDING_ENABLE;
}
static inline void clr_context_pending_enable(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_ENABLE;
}
static inline bool context_registered(struct intel_context *ce)
{
return ce->guc_state.sched_state & SCHED_STATE_REGISTERED;
}
static inline void set_context_registered(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state |= SCHED_STATE_REGISTERED;
}
static inline void clr_context_registered(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state &= ~SCHED_STATE_REGISTERED;
}
static inline bool context_policy_required(struct intel_context *ce)
{
return ce->guc_state.sched_state & SCHED_STATE_POLICY_REQUIRED;
}
static inline void set_context_policy_required(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state |= SCHED_STATE_POLICY_REQUIRED;
}
static inline void clr_context_policy_required(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state &= ~SCHED_STATE_POLICY_REQUIRED;
}
static inline bool context_close_done(struct intel_context *ce)
{
return ce->guc_state.sched_state & SCHED_STATE_CLOSED;
}
static inline void set_context_close_done(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state |= SCHED_STATE_CLOSED;
}
static inline u32 context_blocked(struct intel_context *ce)
{
return (ce->guc_state.sched_state & SCHED_STATE_BLOCKED_MASK) >>
SCHED_STATE_BLOCKED_SHIFT;
}
static inline void incr_context_blocked(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state += SCHED_STATE_BLOCKED;
GEM_BUG_ON(!context_blocked(ce)); /* Overflow check */
}
static inline void decr_context_blocked(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
GEM_BUG_ON(!context_blocked(ce)); /* Underflow check */
ce->guc_state.sched_state -= SCHED_STATE_BLOCKED;
}
static struct intel_context *
request_to_scheduling_context(struct i915_request *rq)
{
return intel_context_to_parent(rq->context);
}
static inline bool context_guc_id_invalid(struct intel_context *ce)
{
return ce->guc_id.id == GUC_INVALID_CONTEXT_ID;
}
static inline void set_context_guc_id_invalid(struct intel_context *ce)
{
ce->guc_id.id = GUC_INVALID_CONTEXT_ID;
}
static inline struct intel_guc *ce_to_guc(struct intel_context *ce)
{
return &ce->engine->gt->uc.guc;
}
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
{
return rb_entry(rb, struct i915_priolist, node);
}
/*
* When using multi-lrc submission a scratch memory area is reserved in the
* parent's context state for the process descriptor, work queue, and handshake
* between the parent + children contexts to insert safe preemption points
* between each of the BBs. Currently the scratch area is sized to a page.
*
* The layout of this scratch area is below:
* 0 guc_process_desc
* + sizeof(struct guc_process_desc) child go
* + CACHELINE_BYTES child join[0]
* ...
* + CACHELINE_BYTES child join[n - 1]
* ... unused
* PARENT_SCRATCH_SIZE / 2 work queue start
* ... work queue
* PARENT_SCRATCH_SIZE - 1 work queue end
*/
#define WQ_SIZE (PARENT_SCRATCH_SIZE / 2)
#define WQ_OFFSET (PARENT_SCRATCH_SIZE - WQ_SIZE)
struct sync_semaphore {
u32 semaphore;
u8 unused[CACHELINE_BYTES - sizeof(u32)];
};
struct parent_scratch {
union guc_descs {
struct guc_sched_wq_desc wq_desc;
struct guc_process_desc_v69 pdesc;
} descs;
struct sync_semaphore go;
struct sync_semaphore join[MAX_ENGINE_INSTANCE + 1];
u8 unused[WQ_OFFSET - sizeof(union guc_descs) -
sizeof(struct sync_semaphore) * (MAX_ENGINE_INSTANCE + 2)];
u32 wq[WQ_SIZE / sizeof(u32)];
};
static u32 __get_parent_scratch_offset(struct intel_context *ce)
{
GEM_BUG_ON(!ce->parallel.guc.parent_page);
return ce->parallel.guc.parent_page * PAGE_SIZE;
}
static u32 __get_wq_offset(struct intel_context *ce)
{
BUILD_BUG_ON(offsetof(struct parent_scratch, wq) != WQ_OFFSET);
return __get_parent_scratch_offset(ce) + WQ_OFFSET;
}
static struct parent_scratch *
__get_parent_scratch(struct intel_context *ce)
{
BUILD_BUG_ON(sizeof(struct parent_scratch) != PARENT_SCRATCH_SIZE);
BUILD_BUG_ON(sizeof(struct sync_semaphore) != CACHELINE_BYTES);
/*
* Need to subtract LRC_STATE_OFFSET here as the
* parallel.guc.parent_page is the offset into ce->state while
* ce->lrc_reg_reg is ce->state + LRC_STATE_OFFSET.
*/
return (struct parent_scratch *)
(ce->lrc_reg_state +
((__get_parent_scratch_offset(ce) -
LRC_STATE_OFFSET) / sizeof(u32)));
}
static struct guc_process_desc_v69 *
__get_process_desc_v69(struct intel_context *ce)
{
struct parent_scratch *ps = __get_parent_scratch(ce);
return &ps->descs.pdesc;
}
static struct guc_sched_wq_desc *
__get_wq_desc_v70(struct intel_context *ce)
{
struct parent_scratch *ps = __get_parent_scratch(ce);
return &ps->descs.wq_desc;
}
static u32 *get_wq_pointer(struct intel_context *ce, u32 wqi_size)
{
/*
* Check for space in work queue. Caching a value of head pointer in
* intel_context structure in order reduce the number accesses to shared
* GPU memory which may be across a PCIe bus.
*/
#define AVAILABLE_SPACE \
CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE)
if (wqi_size > AVAILABLE_SPACE) {
ce->parallel.guc.wqi_head = READ_ONCE(*ce->parallel.guc.wq_head);
if (wqi_size > AVAILABLE_SPACE)
return NULL;
}
#undef AVAILABLE_SPACE
return &__get_parent_scratch(ce)->wq[ce->parallel.guc.wqi_tail / sizeof(u32)];
}
static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id)
{
struct intel_context *ce = xa_load(&guc->context_lookup, id);
GEM_BUG_ON(id >= GUC_MAX_CONTEXT_ID);
return ce;
}
static struct guc_lrc_desc_v69 *__get_lrc_desc_v69(struct intel_guc *guc, u32 index)
{
struct guc_lrc_desc_v69 *base = guc->lrc_desc_pool_vaddr_v69;
if (!base)
return NULL;
GEM_BUG_ON(index >= GUC_MAX_CONTEXT_ID);
return &base[index];
}
static int guc_lrc_desc_pool_create_v69(struct intel_guc *guc)
{
u32 size;
int ret;
size = PAGE_ALIGN(sizeof(struct guc_lrc_desc_v69) *
GUC_MAX_CONTEXT_ID);
ret = intel_guc_allocate_and_map_vma(guc, size, &guc->lrc_desc_pool_v69,
(void **)&guc->lrc_desc_pool_vaddr_v69);
if (ret)
return ret;
return 0;
}
static void guc_lrc_desc_pool_destroy_v69(struct intel_guc *guc)
{
if (!guc->lrc_desc_pool_vaddr_v69)
return;
guc->lrc_desc_pool_vaddr_v69 = NULL;
i915_vma_unpin_and_release(&guc->lrc_desc_pool_v69, I915_VMA_RELEASE_MAP);
}
static inline bool guc_submission_initialized(struct intel_guc *guc)
{
return guc->submission_initialized;
}
static inline void _reset_lrc_desc_v69(struct intel_guc *guc, u32 id)
{
struct guc_lrc_desc_v69 *desc = __get_lrc_desc_v69(guc, id);
if (desc)
memset(desc, 0, sizeof(*desc));
}
static inline bool ctx_id_mapped(struct intel_guc *guc, u32 id)
{
return __get_context(guc, id);
}
static inline void set_ctx_id_mapping(struct intel_guc *guc, u32 id,
struct intel_context *ce)
{
unsigned long flags;
/*
* xarray API doesn't have xa_save_irqsave wrapper, so calling the
* lower level functions directly.
*/
xa_lock_irqsave(&guc->context_lookup, flags);
__xa_store(&guc->context_lookup, id, ce, GFP_ATOMIC);
xa_unlock_irqrestore(&guc->context_lookup, flags);
}
static inline void clr_ctx_id_mapping(struct intel_guc *guc, u32 id)
{
unsigned long flags;
if (unlikely(!guc_submission_initialized(guc)))
return;
_reset_lrc_desc_v69(guc, id);
/*
* xarray API doesn't have xa_erase_irqsave wrapper, so calling
* the lower level functions directly.
*/
xa_lock_irqsave(&guc->context_lookup, flags);
__xa_erase(&guc->context_lookup, id);
xa_unlock_irqrestore(&guc->context_lookup, flags);
}
static void decr_outstanding_submission_g2h(struct intel_guc *guc)
{
if (atomic_dec_and_test(&guc->outstanding_submission_g2h))
wake_up_all(&guc->ct.wq);
}
static int guc_submission_send_busy_loop(struct intel_guc *guc,
const u32 *action,
u32 len,
u32 g2h_len_dw,
bool loop)
{
/*
* We always loop when a send requires a reply (i.e. g2h_len_dw > 0),
* so we don't handle the case where we don't get a reply because we
* aborted the send due to the channel being busy.
*/
GEM_BUG_ON(g2h_len_dw && !loop);
if (g2h_len_dw)
atomic_inc(&guc->outstanding_submission_g2h);
return intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
}
int intel_guc_wait_for_pending_msg(struct intel_guc *guc,
atomic_t *wait_var,
bool interruptible,
long timeout)
{
const int state = interruptible ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
DEFINE_WAIT(wait);
might_sleep();
GEM_BUG_ON(timeout < 0);
if (!atomic_read(wait_var))
return 0;
if (!timeout)
return -ETIME;
for (;;) {
prepare_to_wait(&guc->ct.wq, &wait, state);
if (!atomic_read(wait_var))
break;
if (signal_pending_state(state, current)) {
timeout = -EINTR;
break;
}
if (!timeout) {
timeout = -ETIME;
break;
}
timeout = io_schedule_timeout(timeout);
}
finish_wait(&guc->ct.wq, &wait);
return (timeout < 0) ? timeout : 0;
}
int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout)
{
if (!intel_uc_uses_guc_submission(&guc_to_gt(guc)->uc))
return 0;
return intel_guc_wait_for_pending_msg(guc,
&guc->outstanding_submission_g2h,
true, timeout);
}
static int guc_context_policy_init_v70(struct intel_context *ce, bool loop);
static int try_context_registration(struct intel_context *ce, bool loop);
static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
{
int err = 0;
struct intel_context *ce = request_to_scheduling_context(rq);
u32 action[3];
int len = 0;
u32 g2h_len_dw = 0;
bool enabled;
lockdep_assert_held(&rq->engine->sched_engine->lock);
/*
* Corner case where requests were sitting in the priority list or a
* request resubmitted after the context was banned.
*/
if (unlikely(!intel_context_is_schedulable(ce))) {
i915_request_put(i915_request_mark_eio(rq));
intel_engine_signal_breadcrumbs(ce->engine);
return 0;
}
GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
GEM_BUG_ON(context_guc_id_invalid(ce));
if (context_policy_required(ce)) {
err = guc_context_policy_init_v70(ce, false);
if (err)
return err;
}
spin_lock(&ce->guc_state.lock);
/*
* The request / context will be run on the hardware when scheduling
* gets enabled in the unblock. For multi-lrc we still submit the
* context to move the LRC tails.
*/
if (unlikely(context_blocked(ce) && !intel_context_is_parent(ce)))
goto out;
enabled = context_enabled(ce) || context_blocked(ce);
if (!enabled) {
action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET;
action[len++] = ce->guc_id.id;
action[len++] = GUC_CONTEXT_ENABLE;
set_context_pending_enable(ce);
intel_context_get(ce);
g2h_len_dw = G2H_LEN_DW_SCHED_CONTEXT_MODE_SET;
} else {
action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT;
action[len++] = ce->guc_id.id;
}
err = intel_guc_send_nb(guc, action, len, g2h_len_dw);
if (!enabled && !err) {
trace_intel_context_sched_enable(ce);
atomic_inc(&guc->outstanding_submission_g2h);
set_context_enabled(ce);
/*
* Without multi-lrc KMD does the submission step (moving the
* lrc tail) so enabling scheduling is sufficient to submit the
* context. This isn't the case in multi-lrc submission as the
* GuC needs to move the tails, hence the need for another H2G
* to submit a multi-lrc context after enabling scheduling.
*/
if (intel_context_is_parent(ce)) {
action[0] = INTEL_GUC_ACTION_SCHED_CONTEXT;
err = intel_guc_send_nb(guc, action, len - 1, 0);
}
} else if (!enabled) {
clr_context_pending_enable(ce);
intel_context_put(ce);
}
if (likely(!err))
trace_i915_request_guc_submit(rq);
out:
spin_unlock(&ce->guc_state.lock);
return err;
}
static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
{
int ret = __guc_add_request(guc, rq);
if (unlikely(ret == -EBUSY)) {
guc->stalled_request = rq;
guc->submission_stall_reason = STALL_ADD_REQUEST;
}
return ret;
}
static inline void guc_set_lrc_tail(struct i915_request *rq)
{
rq->context->lrc_reg_state[CTX_RING_TAIL] =
intel_ring_set_tail(rq->ring, rq->tail);
}
static inline int rq_prio(const struct i915_request *rq)
{
return rq->sched.attr.priority;
}
static bool is_multi_lrc_rq(struct i915_request *rq)
{
return intel_context_is_parallel(rq->context);
}
static bool can_merge_rq(struct i915_request *rq,
struct i915_request *last)
{
return request_to_scheduling_context(rq) ==
request_to_scheduling_context(last);
}
static u32 wq_space_until_wrap(struct intel_context *ce)
{
return (WQ_SIZE - ce->parallel.guc.wqi_tail);
}
static void write_wqi(struct intel_context *ce, u32 wqi_size)
{
BUILD_BUG_ON(!is_power_of_2(WQ_SIZE));
/*
* Ensure WQI are visible before updating tail
*/
intel_guc_write_barrier(ce_to_guc(ce));
ce->parallel.guc.wqi_tail = (ce->parallel.guc.wqi_tail + wqi_size) &
(WQ_SIZE - 1);
WRITE_ONCE(*ce->parallel.guc.wq_tail, ce->parallel.guc.wqi_tail);
}
static int guc_wq_noop_append(struct intel_context *ce)
{
u32 *wqi = get_wq_pointer(ce, wq_space_until_wrap(ce));
u32 len_dw = wq_space_until_wrap(ce) / sizeof(u32) - 1;
if (!wqi)
return -EBUSY;
GEM_BUG_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw));
*wqi = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
FIELD_PREP(WQ_LEN_MASK, len_dw);
ce->parallel.guc.wqi_tail = 0;
return 0;
}
static int __guc_wq_item_append(struct i915_request *rq)
{
struct intel_context *ce = request_to_scheduling_context(rq);
struct intel_context *child;
unsigned int wqi_size = (ce->parallel.number_children + 4) *
sizeof(u32);
u32 *wqi;
u32 len_dw = (wqi_size / sizeof(u32)) - 1;
int ret;
/* Ensure context is in correct state updating work queue */
GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
GEM_BUG_ON(context_guc_id_invalid(ce));
GEM_BUG_ON(context_wait_for_deregister_to_register(ce));
GEM_BUG_ON(!ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id));
/* Insert NOOP if this work queue item will wrap the tail pointer. */
if (wqi_size > wq_space_until_wrap(ce)) {
ret = guc_wq_noop_append(ce);
if (ret)
return ret;
}
wqi = get_wq_pointer(ce, wqi_size);
if (!wqi)
return -EBUSY;
GEM_BUG_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw));
*wqi++ = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_MULTI_LRC) |
FIELD_PREP(WQ_LEN_MASK, len_dw);
*wqi++ = ce->lrc.lrca;
*wqi++ = FIELD_PREP(WQ_GUC_ID_MASK, ce->guc_id.id) |
FIELD_PREP(WQ_RING_TAIL_MASK, ce->ring->tail / sizeof(u64));
*wqi++ = 0; /* fence_id */
for_each_child(ce, child)
*wqi++ = child->ring->tail / sizeof(u64);
write_wqi(ce, wqi_size);
return 0;
}
static int guc_wq_item_append(struct intel_guc *guc,
struct i915_request *rq)
{
struct intel_context *ce = request_to_scheduling_context(rq);
int ret;
if (unlikely(!intel_context_is_schedulable(ce)))
return 0;
ret = __guc_wq_item_append(rq);
if (unlikely(ret == -EBUSY)) {
guc->stalled_request = rq;
guc->submission_stall_reason = STALL_MOVE_LRC_TAIL;
}
return ret;
}
static bool multi_lrc_submit(struct i915_request *rq)
{
struct intel_context *ce = request_to_scheduling_context(rq);
intel_ring_set_tail(rq->ring, rq->tail);
/*
* We expect the front end (execbuf IOCTL) to set this flag on the last
* request generated from a multi-BB submission. This indicates to the
* backend (GuC interface) that we should submit this context thus
* submitting all the requests generated in parallel.
*/
return test_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL, &rq->fence.flags) ||
!intel_context_is_schedulable(ce);
}
static int guc_dequeue_one_context(struct intel_guc *guc)
{
struct i915_sched_engine * const sched_engine = guc->sched_engine;
struct i915_request *last = NULL;
bool submit = false;
struct rb_node *rb;
int ret;
lockdep_assert_held(&sched_engine->lock);
if (guc->stalled_request) {
submit = true;
last = guc->stalled_request;
switch (guc->submission_stall_reason) {
case STALL_REGISTER_CONTEXT:
goto register_context;
case STALL_MOVE_LRC_TAIL:
goto move_lrc_tail;
case STALL_ADD_REQUEST:
goto add_request;
default:
MISSING_CASE(guc->submission_stall_reason);
}
}
while ((rb = rb_first_cached(&sched_engine->queue))) {
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
priolist_for_each_request_consume(rq, rn, p) {
if (last && !can_merge_rq(rq, last))
goto register_context;
list_del_init(&rq->sched.link);
__i915_request_submit(rq);
trace_i915_request_in(rq, 0);
last = rq;
if (is_multi_lrc_rq(rq)) {
/*
* We need to coalesce all multi-lrc requests in
* a relationship into a single H2G. We are
* guaranteed that all of these requests will be
* submitted sequentially.
*/
if (multi_lrc_submit(rq)) {
submit = true;
goto register_context;
}
} else {
submit = true;
}
}
rb_erase_cached(&p->node, &sched_engine->queue);
i915_priolist_free(p);
}
register_context:
if (submit) {
struct intel_context *ce = request_to_scheduling_context(last);
if (unlikely(!ctx_id_mapped(guc, ce->guc_id.id) &&
intel_context_is_schedulable(ce))) {
ret = try_context_registration(ce, false);
if (unlikely(ret == -EPIPE)) {
goto deadlk;
} else if (ret == -EBUSY) {
guc->stalled_request = last;
guc->submission_stall_reason =
STALL_REGISTER_CONTEXT;
goto schedule_tasklet;
} else if (ret != 0) {
GEM_WARN_ON(ret); /* Unexpected */
goto deadlk;
}
}
move_lrc_tail:
if (is_multi_lrc_rq(last)) {
ret = guc_wq_item_append(guc, last);
if (ret == -EBUSY) {
goto schedule_tasklet;
} else if (ret != 0) {
GEM_WARN_ON(ret); /* Unexpected */
goto deadlk;
}
} else {
guc_set_lrc_tail(last);
}
add_request:
ret = guc_add_request(guc, last);
if (unlikely(ret == -EPIPE)) {
goto deadlk;
} else if (ret == -EBUSY) {
goto schedule_tasklet;
} else if (ret != 0) {
GEM_WARN_ON(ret); /* Unexpected */
goto deadlk;
}
}
guc->stalled_request = NULL;
guc->submission_stall_reason = STALL_NONE;
return submit;
deadlk:
sched_engine->tasklet.callback = NULL;
tasklet_disable_nosync(&sched_engine->tasklet);
return false;
schedule_tasklet:
tasklet_schedule(&sched_engine->tasklet);
return false;
}
static void guc_submission_tasklet(struct tasklet_struct *t)
{
struct i915_sched_engine *sched_engine =
from_tasklet(sched_engine, t, tasklet);
unsigned long flags;
bool loop;
spin_lock_irqsave(&sched_engine->lock, flags);
do {
loop = guc_dequeue_one_context(sched_engine->private_data);
} while (loop);
i915_sched_engine_reset_on_empty(sched_engine);
spin_unlock_irqrestore(&sched_engine->lock, flags);
}
static void cs_irq_handler(struct intel_engine_cs *engine, u16 iir)
{
if (iir & GT_RENDER_USER_INTERRUPT)
intel_engine_signal_breadcrumbs(engine);
}
static void __guc_context_destroy(struct intel_context *ce);
static void release_guc_id(struct intel_guc *guc, struct intel_context *ce);
static void guc_signal_context_fence(struct intel_context *ce);
static void guc_cancel_context_requests(struct intel_context *ce);
static void guc_blocked_fence_complete(struct intel_context *ce);
static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
{
struct intel_context *ce;
unsigned long index, flags;
bool pending_disable, pending_enable, deregister, destroyed, banned;
xa_lock_irqsave(&guc->context_lookup, flags);
xa_for_each(&guc->context_lookup, index, ce) {
/*
* Corner case where the ref count on the object is zero but and
* deregister G2H was lost. In this case we don't touch the ref
* count and finish the destroy of the context.
*/
bool do_put = kref_get_unless_zero(&ce->ref);
xa_unlock(&guc->context_lookup);
if (test_bit(CONTEXT_GUC_INIT, &ce->flags) &&
(cancel_delayed_work(&ce->guc_state.sched_disable_delay_work))) {
/* successful cancel so jump straight to close it */
intel_context_sched_disable_unpin(ce);
}
spin_lock(&ce->guc_state.lock);
/*
* Once we are at this point submission_disabled() is guaranteed
* to be visible to all callers who set the below flags (see above
* flush and flushes in reset_prepare). If submission_disabled()
* is set, the caller shouldn't set these flags.
*/
destroyed = context_destroyed(ce);
pending_enable = context_pending_enable(ce);
pending_disable = context_pending_disable(ce);
deregister = context_wait_for_deregister_to_register(ce);
banned = context_banned(ce);
init_sched_state(ce);
spin_unlock(&ce->guc_state.lock);
if (pending_enable || destroyed || deregister) {
decr_outstanding_submission_g2h(guc);
if (deregister)
guc_signal_context_fence(ce);
if (destroyed) {
intel_gt_pm_put_async(guc_to_gt(guc));
release_guc_id(guc, ce);
__guc_context_destroy(ce);
}
if (pending_enable || deregister)
intel_context_put(ce);
}
/* Not mutualy exclusive with above if statement. */
if (pending_disable) {
guc_signal_context_fence(ce);
if (banned) {
guc_cancel_context_requests(ce);
intel_engine_signal_breadcrumbs(ce->engine);
}
intel_context_sched_disable_unpin(ce);
decr_outstanding_submission_g2h(guc);
spin_lock(&ce->guc_state.lock);
guc_blocked_fence_complete(ce);
spin_unlock(&ce->guc_state.lock);
intel_context_put(ce);
}
if (do_put)
intel_context_put(ce);
xa_lock(&guc->context_lookup);
}
xa_unlock_irqrestore(&guc->context_lookup, flags);
}
/*
* GuC stores busyness stats for each engine at context in/out boundaries. A
* context 'in' logs execution start time, 'out' adds in -> out delta to total.
* i915/kmd accesses 'start', 'total' and 'context id' from memory shared with
* GuC.
*
* __i915_pmu_event_read samples engine busyness. When sampling, if context id
* is valid (!= ~0) and start is non-zero, the engine is considered to be
* active. For an active engine total busyness = total + (now - start), where
* 'now' is the time at which the busyness is sampled. For inactive engine,
* total busyness = total.
*
* All times are captured from GUCPMTIMESTAMP reg and are in gt clock domain.
*
* The start and total values provided by GuC are 32 bits and wrap around in a
* few minutes. Since perf pmu provides busyness as 64 bit monotonically
* increasing ns values, there is a need for this implementation to account for
* overflows and extend the GuC provided values to 64 bits before returning
* busyness to the user. In order to do that, a worker runs periodically at
* frequency = 1/8th the time it takes for the timestamp to wrap (i.e. once in
* 27 seconds for a gt clock frequency of 19.2 MHz).
*/
#define WRAP_TIME_CLKS U32_MAX
#define POLL_TIME_CLKS (WRAP_TIME_CLKS >> 3)
static void
__extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
{
u32 gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
u32 gt_stamp_last = lower_32_bits(guc->timestamp.gt_stamp);
if (new_start == lower_32_bits(*prev_start))
return;
/*
* When gt is unparked, we update the gt timestamp and start the ping
* worker that updates the gt_stamp every POLL_TIME_CLKS. As long as gt
* is unparked, all switched in contexts will have a start time that is
* within +/- POLL_TIME_CLKS of the most recent gt_stamp.
*
* If neither gt_stamp nor new_start has rolled over, then the
* gt_stamp_hi does not need to be adjusted, however if one of them has
* rolled over, we need to adjust gt_stamp_hi accordingly.
*
* The below conditions address the cases of new_start rollover and
* gt_stamp_last rollover respectively.
*/
if (new_start < gt_stamp_last &&
(new_start - gt_stamp_last) <= POLL_TIME_CLKS)
gt_stamp_hi++;
if (new_start > gt_stamp_last &&
(gt_stamp_last - new_start) <= POLL_TIME_CLKS && gt_stamp_hi)
gt_stamp_hi--;
*prev_start = ((u64)gt_stamp_hi << 32) | new_start;
}
#define record_read(map_, field_) \
iosys_map_rd_field(map_, 0, struct guc_engine_usage_record, field_)
/*
* GuC updates shared memory and KMD reads it. Since this is not synchronized,
* we run into a race where the value read is inconsistent. Sometimes the
* inconsistency is in reading the upper MSB bytes of the last_in value when
* this race occurs. 2 types of cases are seen - upper 8 bits are zero and upper
* 24 bits are zero. Since these are non-zero values, it is non-trivial to
* determine validity of these values. Instead we read the values multiple times
* until they are consistent. In test runs, 3 attempts results in consistent
* values. The upper bound is set to 6 attempts and may need to be tuned as per
* any new occurences.
*/
static void __get_engine_usage_record(struct intel_engine_cs *engine,
u32 *last_in, u32 *id, u32 *total)
{
struct iosys_map rec_map = intel_guc_engine_usage_record_map(engine);
int i = 0;
do {
*last_in = record_read(&rec_map, last_switch_in_stamp);
*id = record_read(&rec_map, current_context_index);
*total = record_read(&rec_map, total_runtime);
if (record_read(&rec_map, last_switch_in_stamp) == *last_in &&
record_read(&rec_map, current_context_index) == *id &&
record_read(&rec_map, total_runtime) == *total)
break;
} while (++i < 6);
}
static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
{
struct intel_engine_guc_stats *stats = &engine->stats.guc;
struct intel_guc *guc = &engine->gt->uc.guc;
u32 last_switch, ctx_id, total;
lockdep_assert_held(&guc->timestamp.lock);
__get_engine_usage_record(engine, &last_switch, &ctx_id, &total);
stats->running = ctx_id != ~0U && last_switch;
if (stats->running)
__extend_last_switch(guc, &stats->start_gt_clk, last_switch);
/*
* Instead of adjusting the total for overflow, just add the
* difference from previous sample stats->total_gt_clks
*/
if (total && total != ~0U) {
stats->total_gt_clks += (u32)(total - stats->prev_total);
stats->prev_total = total;
}
}
static u32 gpm_timestamp_shift(struct intel_gt *gt)
{
intel_wakeref_t wakeref;
u32 reg, shift;
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
reg = intel_uncore_read(gt->uncore, RPM_CONFIG0);
shift = (reg & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT;
return 3 - shift;
}
static void guc_update_pm_timestamp(struct intel_guc *guc, ktime_t *now)
{
struct intel_gt *gt = guc_to_gt(guc);
u32 gt_stamp_lo, gt_stamp_hi;
u64 gpm_ts;
lockdep_assert_held(&guc->timestamp.lock);
gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
gpm_ts = intel_uncore_read64_2x32(gt->uncore, MISC_STATUS0,
MISC_STATUS1) >> guc->timestamp.shift;
gt_stamp_lo = lower_32_bits(gpm_ts);
*now = ktime_get();
if (gt_stamp_lo < lower_32_bits(guc->timestamp.gt_stamp))
gt_stamp_hi++;
guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_lo;
}
/*
* Unlike the execlist mode of submission total and active times are in terms of
* gt clocks. The *now parameter is retained to return the cpu time at which the
* busyness was sampled.
*/
static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
{
struct intel_engine_guc_stats stats_saved, *stats = &engine->stats.guc;
struct i915_gpu_error *gpu_error = &engine->i915->gpu_error;
struct intel_gt *gt = engine->gt;
struct intel_guc *guc = >->uc.guc;
u64 total, gt_stamp_saved;
unsigned long flags;
u32 reset_count;
bool in_reset;
spin_lock_irqsave(&guc->timestamp.lock, flags);
/*
* If a reset happened, we risk reading partially updated engine
* busyness from GuC, so we just use the driver stored copy of busyness.
* Synchronize with gt reset using reset_count and the
* I915_RESET_BACKOFF flag. Note that reset flow updates the reset_count
* after I915_RESET_BACKOFF flag, so ensure that the reset_count is
* usable by checking the flag afterwards.
*/
reset_count = i915_reset_count(gpu_error);
in_reset = test_bit(I915_RESET_BACKOFF, >->reset.flags);
*now = ktime_get();
/*
* The active busyness depends on start_gt_clk and gt_stamp.
* gt_stamp is updated by i915 only when gt is awake and the
* start_gt_clk is derived from GuC state. To get a consistent
* view of activity, we query the GuC state only if gt is awake.
*/
if (!in_reset && intel_gt_pm_get_if_awake(gt)) {
stats_saved = *stats;
gt_stamp_saved = guc->timestamp.gt_stamp;
/*
* Update gt_clks, then gt timestamp to simplify the 'gt_stamp -
* start_gt_clk' calculation below for active engines.
*/
guc_update_engine_gt_clks(engine);
guc_update_pm_timestamp(guc, now);
intel_gt_pm_put_async(gt);
if (i915_reset_count(gpu_error) != reset_count) {
*stats = stats_saved;
guc->timestamp.gt_stamp = gt_stamp_saved;
}
}
total = intel_gt_clock_interval_to_ns(gt, stats->total_gt_clks);
if (stats->running) {
u64 clk = guc->timestamp.gt_stamp - stats->start_gt_clk;
total += intel_gt_clock_interval_to_ns(gt, clk);
}
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
return ns_to_ktime(total);
}
static void guc_enable_busyness_worker(struct intel_guc *guc)
{
mod_delayed_work(system_highpri_wq, &guc->timestamp.work, guc->timestamp.ping_delay);
}
static void guc_cancel_busyness_worker(struct intel_guc *guc)
{
cancel_delayed_work_sync(&guc->timestamp.work);
}
static void __reset_guc_busyness_stats(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned long flags;
ktime_t unused;
guc_cancel_busyness_worker(guc);
spin_lock_irqsave(&guc->timestamp.lock, flags);
guc_update_pm_timestamp(guc, &unused);
for_each_engine(engine, gt, id) {
guc_update_engine_gt_clks(engine);
engine->stats.guc.prev_total = 0;
}
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
}
static void __update_guc_busyness_stats(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned long flags;
ktime_t unused;
guc->timestamp.last_stat_jiffies = jiffies;
spin_lock_irqsave(&guc->timestamp.lock, flags);
guc_update_pm_timestamp(guc, &unused);
for_each_engine(engine, gt, id)
guc_update_engine_gt_clks(engine);
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
}
static void __guc_context_update_stats(struct intel_context *ce)
{
struct intel_guc *guc = ce_to_guc(ce);
unsigned long flags;
spin_lock_irqsave(&guc->timestamp.lock, flags);
lrc_update_runtime(ce);
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
}
static void guc_context_update_stats(struct intel_context *ce)
{
if (!intel_context_pin_if_active(ce))
return;
__guc_context_update_stats(ce);
intel_context_unpin(ce);
}
static void guc_timestamp_ping(struct work_struct *wrk)
{
struct intel_guc *guc = container_of(wrk, typeof(*guc),
timestamp.work.work);
struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
struct intel_gt *gt = guc_to_gt(guc);
struct intel_context *ce;
intel_wakeref_t wakeref;
unsigned long index;
int srcu, ret;
/*
* Synchronize with gt reset to make sure the worker does not
* corrupt the engine/guc stats. NB: can't actually block waiting
* for a reset to complete as the reset requires flushing out
* this worker thread if started. So waiting would deadlock.
*/
ret = intel_gt_reset_trylock(gt, &srcu);
if (ret)
return;
with_intel_runtime_pm(>->i915->runtime_pm, wakeref)
__update_guc_busyness_stats(guc);
/* adjust context stats for overflow */
xa_for_each(&guc->context_lookup, index, ce)
guc_context_update_stats(ce);
intel_gt_reset_unlock(gt, srcu);
guc_enable_busyness_worker(guc);
}
static int guc_action_enable_usage_stats(struct intel_guc *guc)
{
u32 offset = intel_guc_engine_usage_offset(guc);
u32 action[] = {
INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF,
offset,
0,
};
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
static int guc_init_engine_stats(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
intel_wakeref_t wakeref;
int ret;
with_intel_runtime_pm(>->i915->runtime_pm, wakeref)
ret = guc_action_enable_usage_stats(guc);
if (ret)
guc_err(guc, "Failed to enable usage stats: %pe\n", ERR_PTR(ret));
else
guc_enable_busyness_worker(guc);
return ret;
}
static void guc_fini_engine_stats(struct intel_guc *guc)
{
guc_cancel_busyness_worker(guc);
}
void intel_guc_busyness_park(struct intel_gt *gt)
{
struct intel_guc *guc = >->uc.guc;
if (!guc_submission_initialized(guc))
return;
/*
* There is a race with suspend flow where the worker runs after suspend
* and causes an unclaimed register access warning. Cancel the worker
* synchronously here.
*/
guc_cancel_busyness_worker(guc);
/*
* Before parking, we should sample engine busyness stats if we need to.
* We can skip it if we are less than half a ping from the last time we
* sampled the busyness stats.
*/
if (guc->timestamp.last_stat_jiffies &&
!time_after(jiffies, guc->timestamp.last_stat_jiffies +
(guc->timestamp.ping_delay / 2)))
return;
__update_guc_busyness_stats(guc);
}
void intel_guc_busyness_unpark(struct intel_gt *gt)
{
struct intel_guc *guc = >->uc.guc;
unsigned long flags;
ktime_t unused;
if (!guc_submission_initialized(guc))
return;
spin_lock_irqsave(&guc->timestamp.lock, flags);
guc_update_pm_timestamp(guc, &unused);
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
guc_enable_busyness_worker(guc);
}
static inline bool
submission_disabled(struct intel_guc *guc)
{
struct i915_sched_engine * const sched_engine = guc->sched_engine;
return unlikely(!sched_engine ||
!__tasklet_is_enabled(&sched_engine->tasklet) ||
intel_gt_is_wedged(guc_to_gt(guc)));
}
static void disable_submission(struct intel_guc *guc)
{
struct i915_sched_engine * const sched_engine = guc->sched_engine;
if (__tasklet_is_enabled(&sched_engine->tasklet)) {
GEM_BUG_ON(!guc->ct.enabled);
__tasklet_disable_sync_once(&sched_engine->tasklet);
sched_engine->tasklet.callback = NULL;
}
}
static void enable_submission(struct intel_guc *guc)
{
struct i915_sched_engine * const sched_engine = guc->sched_engine;
unsigned long flags;
spin_lock_irqsave(&guc->sched_engine->lock, flags);
sched_engine->tasklet.callback = guc_submission_tasklet;
wmb(); /* Make sure callback visible */
if (!__tasklet_is_enabled(&sched_engine->tasklet) &&
__tasklet_enable(&sched_engine->tasklet)) {
GEM_BUG_ON(!guc->ct.enabled);
/* And kick in case we missed a new request submission. */
tasklet_hi_schedule(&sched_engine->tasklet);
}
spin_unlock_irqrestore(&guc->sched_engine->lock, flags);
}
static void guc_flush_submissions(struct intel_guc *guc)
{
struct i915_sched_engine * const sched_engine = guc->sched_engine;
unsigned long flags;
spin_lock_irqsave(&sched_engine->lock, flags);
spin_unlock_irqrestore(&sched_engine->lock, flags);
}
static void guc_flush_destroyed_contexts(struct intel_guc *guc);
void intel_guc_submission_reset_prepare(struct intel_guc *guc)
{
if (unlikely(!guc_submission_initialized(guc))) {
/* Reset called during driver load? GuC not yet initialised! */
return;
}
intel_gt_park_heartbeats(guc_to_gt(guc));
disable_submission(guc);
guc->interrupts.disable(guc);
__reset_guc_busyness_stats(guc);
/* Flush IRQ handler */
spin_lock_irq(guc_to_gt(guc)->irq_lock);
spin_unlock_irq(guc_to_gt(guc)->irq_lock);
guc_flush_submissions(guc);
guc_flush_destroyed_contexts(guc);
flush_work(&guc->ct.requests.worker);
scrub_guc_desc_for_outstanding_g2h(guc);
}
static struct intel_engine_cs *
guc_virtual_get_sibling(struct intel_engine_cs *ve, unsigned int sibling)
{
struct intel_engine_cs *engine;
intel_engine_mask_t tmp, mask = ve->mask;
unsigned int num_siblings = 0;
for_each_engine_masked(engine, ve->gt, mask, tmp)
if (num_siblings++ == sibling)
return engine;
return NULL;
}
static inline struct intel_engine_cs *
__context_to_physical_engine(struct intel_context *ce)
{
struct intel_engine_cs *engine = ce->engine;
if (intel_engine_is_virtual(engine))
engine = guc_virtual_get_sibling(engine, 0);
return engine;
}
static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub)
{
struct intel_engine_cs *engine = __context_to_physical_engine(ce);
if (!intel_context_is_schedulable(ce))
return;
GEM_BUG_ON(!intel_context_is_pinned(ce));
/*
* We want a simple context + ring to execute the breadcrumb update.
* We cannot rely on the context being intact across the GPU hang,
* so clear it and rebuild just what we need for the breadcrumb.
* All pending requests for this context will be zapped, and any
* future request will be after userspace has had the opportunity
* to recreate its own state.
*/
if (scrub)
lrc_init_regs(ce, engine, true);
/* Rerun the request; its payload has been neutered (if guilty). */
lrc_update_regs(ce, engine, head);
}
static void guc_engine_reset_prepare(struct intel_engine_cs *engine)
{
/*
* Wa_22011802037: In addition to stopping the cs, we need
* to wait for any pending mi force wakeups
*/
if (IS_MTL_GRAPHICS_STEP(engine->i915, M, STEP_A0, STEP_B0) ||
(GRAPHICS_VER(engine->i915) >= 11 &&
GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 70))) {
intel_engine_stop_cs(engine);
intel_engine_wait_for_pending_mi_fw(engine);
}
}
static void guc_reset_nop(struct intel_engine_cs *engine)
{
}
static void guc_rewind_nop(struct intel_engine_cs *engine, bool stalled)
{
}
static void
__unwind_incomplete_requests(struct intel_context *ce)
{
struct i915_request *rq, *rn;
struct list_head *pl;
int prio = I915_PRIORITY_INVALID;
struct i915_sched_engine * const sched_engine =
ce->engine->sched_engine;
unsigned long flags;
spin_lock_irqsave(&sched_engine->lock, flags);
spin_lock(&ce->guc_state.lock);
list_for_each_entry_safe_reverse(rq, rn,
&ce->guc_state.requests,
sched.link) {
if (i915_request_completed(rq))
continue;
list_del_init(&rq->sched.link);
__i915_request_unsubmit(rq);
/* Push the request back into the queue for later resubmission. */
GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
if (rq_prio(rq) != prio) {
prio = rq_prio(rq);
pl = i915_sched_lookup_priolist(sched_engine, prio);
}
GEM_BUG_ON(i915_sched_engine_is_empty(sched_engine));
list_add(&rq->sched.link, pl);
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
}
spin_unlock(&ce->guc_state.lock);
spin_unlock_irqrestore(&sched_engine->lock, flags);
}
static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t stalled)
{
bool guilty;
struct i915_request *rq;
unsigned long flags;
u32 head;
int i, number_children = ce->parallel.number_children;
struct intel_context *parent = ce;
GEM_BUG_ON(intel_context_is_child(ce));
intel_context_get(ce);
/*
* GuC will implicitly mark the context as non-schedulable when it sends
* the reset notification. Make sure our state reflects this change. The
* context will be marked enabled on resubmission.
*/
spin_lock_irqsave(&ce->guc_state.lock, flags);
clr_context_enabled(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
/*
* For each context in the relationship find the hanging request
* resetting each context / request as needed
*/
for (i = 0; i < number_children + 1; ++i) {
if (!intel_context_is_pinned(ce))
goto next_context;
guilty = false;
rq = intel_context_get_active_request(ce);
if (!rq) {
head = ce->ring->tail;
goto out_replay;
}
if (i915_request_started(rq))
guilty = stalled & ce->engine->mask;
GEM_BUG_ON(i915_active_is_idle(&ce->active));
head = intel_ring_wrap(ce->ring, rq->head);
__i915_request_reset(rq, guilty);
i915_request_put(rq);
out_replay:
guc_reset_state(ce, head, guilty);
next_context:
if (i != number_children)
ce = list_next_entry(ce, parallel.child_link);
}
__unwind_incomplete_requests(parent);
intel_context_put(parent);
}
void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled)
{
struct intel_context *ce;
unsigned long index;
unsigned long flags;
if (unlikely(!guc_submission_initialized(guc))) {
/* Reset called during driver load? GuC not yet initialised! */
return;
}
xa_lock_irqsave(&guc->context_lookup, flags);
xa_for_each(&guc->context_lookup, index, ce) {
if (!kref_get_unless_zero(&ce->ref))
continue;
xa_unlock(&guc->context_lookup);
if (intel_context_is_pinned(ce) &&
!intel_context_is_child(ce))
__guc_reset_context(ce, stalled);
intel_context_put(ce);
xa_lock(&guc->context_lookup);
}
xa_unlock_irqrestore(&guc->context_lookup, flags);
/* GuC is blown away, drop all references to contexts */
xa_destroy(&guc->context_lookup);
}
static void guc_cancel_context_requests(struct intel_context *ce)
{
struct i915_sched_engine *sched_engine = ce_to_guc(ce)->sched_engine;
struct i915_request *rq;
unsigned long flags;
/* Mark all executing requests as skipped. */
spin_lock_irqsave(&sched_engine->lock, flags);
spin_lock(&ce->guc_state.lock);
list_for_each_entry(rq, &ce->guc_state.requests, sched.link)
i915_request_put(i915_request_mark_eio(rq));
spin_unlock(&ce->guc_state.lock);
spin_unlock_irqrestore(&sched_engine->lock, flags);
}
static void
guc_cancel_sched_engine_requests(struct i915_sched_engine *sched_engine)
{
struct i915_request *rq, *rn;
struct rb_node *rb;
unsigned long flags;
/* Can be called during boot if GuC fails to load */
if (!sched_engine)
return;
/*
* Before we call engine->cancel_requests(), we should have exclusive
* access to the submission state. This is arranged for us by the
* caller disabling the interrupt generation, the tasklet and other
* threads that may then access the same state, giving us a free hand
* to reset state. However, we still need to let lockdep be aware that
* we know this state may be accessed in hardirq context, so we
* disable the irq around this manipulation and we want to keep
* the spinlock focused on its duties and not accidentally conflate
* coverage to the submission's irq state. (Similarly, although we
* shouldn't need to disable irq around the manipulation of the
* submission's irq state, we also wish to remind ourselves that
* it is irq state.)
*/
spin_lock_irqsave(&sched_engine->lock, flags);
/* Flush the queued requests to the timeline list (for retiring). */
while ((rb = rb_first_cached(&sched_engine->queue))) {
struct i915_priolist *p = to_priolist(rb);
priolist_for_each_request_consume(rq, rn, p) {
list_del_init(&rq->sched.link);
__i915_request_submit(rq);
i915_request_put(i915_request_mark_eio(rq));
}
rb_erase_cached(&p->node, &sched_engine->queue);
i915_priolist_free(p);
}
/* Remaining _unready_ requests will be nop'ed when submitted */
sched_engine->queue_priority_hint = INT_MIN;
sched_engine->queue = RB_ROOT_CACHED;
spin_unlock_irqrestore(&sched_engine->lock, flags);
}
void intel_guc_submission_cancel_requests(struct intel_guc *guc)
{
struct intel_context *ce;
unsigned long index;
unsigned long flags;
xa_lock_irqsave(&guc->context_lookup, flags);
xa_for_each(&guc->context_lookup, index, ce) {
if (!kref_get_unless_zero(&ce->ref))
continue;
xa_unlock(&guc->context_lookup);
if (intel_context_is_pinned(ce) &&
!intel_context_is_child(ce))
guc_cancel_context_requests(ce);
intel_context_put(ce);
xa_lock(&guc->context_lookup);
}
xa_unlock_irqrestore(&guc->context_lookup, flags);
guc_cancel_sched_engine_requests(guc->sched_engine);
/* GuC is blown away, drop all references to contexts */
xa_destroy(&guc->context_lookup);
}
void intel_guc_submission_reset_finish(struct intel_guc *guc)
{
/* Reset called during driver load or during wedge? */
if (unlikely(!guc_submission_initialized(guc) ||
intel_gt_is_wedged(guc_to_gt(guc)))) {
return;
}
/*
* Technically possible for either of these values to be non-zero here,
* but very unlikely + harmless. Regardless let's add a warn so we can
* see in CI if this happens frequently / a precursor to taking down the
* machine.
*/
GEM_WARN_ON(atomic_read(&guc->outstanding_submission_g2h));
atomic_set(&guc->outstanding_submission_g2h, 0);
intel_guc_global_policies_update(guc);
enable_submission(guc);
intel_gt_unpark_heartbeats(guc_to_gt(guc));
}
static void destroyed_worker_func(struct work_struct *w);
static void reset_fail_worker_func(struct work_struct *w);
/*
* Set up the memory resources to be shared with the GuC (via the GGTT)
* at firmware loading time.
*/
int intel_guc_submission_init(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
int ret;
if (guc->submission_initialized)
return 0;
if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER(1, 0, 0)) {
ret = guc_lrc_desc_pool_create_v69(guc);
if (ret)
return ret;
}
guc->submission_state.guc_ids_bitmap =
bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
if (!guc->submission_state.guc_ids_bitmap) {
ret = -ENOMEM;
goto destroy_pool;
}
guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
guc->timestamp.shift = gpm_timestamp_shift(gt);
guc->submission_initialized = true;
return 0;
destroy_pool:
guc_lrc_desc_pool_destroy_v69(guc);
return ret;
}
void intel_guc_submission_fini(struct intel_guc *guc)
{
if (!guc->submission_initialized)
return;
guc_flush_destroyed_contexts(guc);
guc_lrc_desc_pool_destroy_v69(guc);
i915_sched_engine_put(guc->sched_engine);
bitmap_free(guc->submission_state.guc_ids_bitmap);
guc->submission_initialized = false;
}
static inline void queue_request(struct i915_sched_engine *sched_engine,
struct i915_request *rq,
int prio)
{
GEM_BUG_ON(!list_empty(&rq->sched.link));
list_add_tail(&rq->sched.link,
i915_sched_lookup_priolist(sched_engine, prio));
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
tasklet_hi_schedule(&sched_engine->tasklet);
}
static int guc_bypass_tasklet_submit(struct intel_guc *guc,
struct i915_request *rq)
{
int ret = 0;
__i915_request_submit(rq);
trace_i915_request_in(rq, 0);
if (is_multi_lrc_rq(rq)) {
if (multi_lrc_submit(rq)) {
ret = guc_wq_item_append(guc, rq);
if (!ret)
ret = guc_add_request(guc, rq);
}
} else {
guc_set_lrc_tail(rq);
ret = guc_add_request(guc, rq);
}
if (unlikely(ret == -EPIPE))
disable_submission(guc);
return ret;
}
static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq)
{
struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
struct intel_context *ce = request_to_scheduling_context(rq);
return submission_disabled(guc) || guc->stalled_request ||
!i915_sched_engine_is_empty(sched_engine) ||
!ctx_id_mapped(guc, ce->guc_id.id);
}
static void guc_submit_request(struct i915_request *rq)
{
struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
struct intel_guc *guc = &rq->engine->gt->uc.guc;
unsigned long flags;
/* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&sched_engine->lock, flags);
if (need_tasklet(guc, rq))
queue_request(sched_engine, rq, rq_prio(rq));
else if (guc_bypass_tasklet_submit(guc, rq) == -EBUSY)
tasklet_hi_schedule(&sched_engine->tasklet);
spin_unlock_irqrestore(&sched_engine->lock, flags);
}
static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
{
int ret;
GEM_BUG_ON(intel_context_is_child(ce));
if (intel_context_is_parent(ce))
ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
NUMBER_MULTI_LRC_GUC_ID(guc),
order_base_2(ce->parallel.number_children
+ 1));
else
ret = ida_simple_get(&guc->submission_state.guc_ids,
NUMBER_MULTI_LRC_GUC_ID(guc),
guc->submission_state.num_guc_ids,
GFP_KERNEL | __GFP_RETRY_MAYFAIL |
__GFP_NOWARN);
if (unlikely(ret < 0))
return ret;
if (!intel_context_is_parent(ce))
++guc->submission_state.guc_ids_in_use;
ce->guc_id.id = ret;
return 0;
}
static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
{
GEM_BUG_ON(intel_context_is_child(ce));
if (!context_guc_id_invalid(ce)) {
if (intel_context_is_parent(ce)) {
bitmap_release_region(guc->submission_state.guc_ids_bitmap,
ce->guc_id.id,
order_base_2(ce->parallel.number_children
+ 1));
} else {
--guc->submission_state.guc_ids_in_use;
ida_simple_remove(&guc->submission_state.guc_ids,
ce->guc_id.id);
}
clr_ctx_id_mapping(guc, ce->guc_id.id);
set_context_guc_id_invalid(ce);
}
if (!list_empty(&ce->guc_id.link))
list_del_init(&ce->guc_id.link);
}
static void release_guc_id(struct intel_guc *guc, struct intel_context *ce)
{
unsigned long flags;
spin_lock_irqsave(&guc->submission_state.lock, flags);
__release_guc_id(guc, ce);
spin_unlock_irqrestore(&guc->submission_state.lock, flags);
}
static int steal_guc_id(struct intel_guc *guc, struct intel_context *ce)
{
struct intel_context *cn;
lockdep_assert_held(&guc->submission_state.lock);
GEM_BUG_ON(intel_context_is_child(ce));
GEM_BUG_ON(intel_context_is_parent(ce));
if (!list_empty(&guc->submission_state.guc_id_list)) {
cn = list_first_entry(&guc->submission_state.guc_id_list,
struct intel_context,
guc_id.link);
GEM_BUG_ON(atomic_read(&cn->guc_id.ref));
GEM_BUG_ON(context_guc_id_invalid(cn));
GEM_BUG_ON(intel_context_is_child(cn));
GEM_BUG_ON(intel_context_is_parent(cn));
list_del_init(&cn->guc_id.link);
ce->guc_id.id = cn->guc_id.id;
spin_lock(&cn->guc_state.lock);
clr_context_registered(cn);
spin_unlock(&cn->guc_state.lock);
set_context_guc_id_invalid(cn);
#ifdef CONFIG_DRM_I915_SELFTEST
guc->number_guc_id_stolen++;
#endif
return 0;
} else {
return -EAGAIN;
}
}
static int assign_guc_id(struct intel_guc *guc, struct intel_context *ce)
{
int ret;
lockdep_assert_held(&guc->submission_state.lock);
GEM_BUG_ON(intel_context_is_child(ce));
ret = new_guc_id(guc, ce);
if (unlikely(ret < 0)) {
if (intel_context_is_parent(ce))
return -ENOSPC;
ret = steal_guc_id(guc, ce);
if (ret < 0)
return ret;
}
if (intel_context_is_parent(ce)) {
struct intel_context *child;
int i = 1;
for_each_child(ce, child)
child->guc_id.id = ce->guc_id.id + i++;
}
return 0;
}
#define PIN_GUC_ID_TRIES 4
static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
{
int ret = 0;
unsigned long flags, tries = PIN_GUC_ID_TRIES;
GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
try_again:
spin_lock_irqsave(&guc->submission_state.lock, flags);
might_lock(&ce->guc_state.lock);
if (context_guc_id_invalid(ce)) {
ret = assign_guc_id(guc, ce);
if (ret)
goto out_unlock;
ret = 1; /* Indidcates newly assigned guc_id */
}
if (!list_empty(&ce->guc_id.link))
list_del_init(&ce->guc_id.link);
atomic_inc(&ce->guc_id.ref);
out_unlock:
spin_unlock_irqrestore(&guc->submission_state.lock, flags);
/*
* -EAGAIN indicates no guc_id are available, let's retire any
* outstanding requests to see if that frees up a guc_id. If the first
* retire didn't help, insert a sleep with the timeslice duration before
* attempting to retire more requests. Double the sleep period each
* subsequent pass before finally giving up. The sleep period has max of
* 100ms and minimum of 1ms.
*/
if (ret == -EAGAIN && --tries) {
if (PIN_GUC_ID_TRIES - tries > 1) {
unsigned int timeslice_shifted =
ce->engine->props.timeslice_duration_ms <<
(PIN_GUC_ID_TRIES - tries - 2);
unsigned int max = min_t(unsigned int, 100,
timeslice_shifted);
msleep(max_t(unsigned int, max, 1));
}
intel_gt_retire_requests(guc_to_gt(guc));
goto try_again;
}
return ret;
}
static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce)
{
unsigned long flags;
GEM_BUG_ON(atomic_read(&ce->guc_id.ref) < 0);
GEM_BUG_ON(intel_context_is_child(ce));
if (unlikely(context_guc_id_invalid(ce) ||
intel_context_is_parent(ce)))
return;
spin_lock_irqsave(&guc->submission_state.lock, flags);
if (!context_guc_id_invalid(ce) && list_empty(&ce->guc_id.link) &&
!atomic_read(&ce->guc_id.ref))
list_add_tail(&ce->guc_id.link,
&guc->submission_state.guc_id_list);
spin_unlock_irqrestore(&guc->submission_state.lock, flags);
}
static int __guc_action_register_multi_lrc_v69(struct intel_guc *guc,
struct intel_context *ce,
u32 guc_id,
u32 offset,
bool loop)
{
struct intel_context *child;
u32 action[4 + MAX_ENGINE_INSTANCE];
int len = 0;
GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE);
action[len++] = INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
action[len++] = guc_id;
action[len++] = ce->parallel.number_children + 1;
action[len++] = offset;
for_each_child(ce, child) {
offset += sizeof(struct guc_lrc_desc_v69);
action[len++] = offset;
}
return guc_submission_send_busy_loop(guc, action, len, 0, loop);
}
static int __guc_action_register_multi_lrc_v70(struct intel_guc *guc,
struct intel_context *ce,
struct guc_ctxt_registration_info *info,
bool loop)
{
struct intel_context *child;
u32 action[13 + (MAX_ENGINE_INSTANCE * 2)];
int len = 0;
u32 next_id;
GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE);
action[len++] = INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
action[len++] = info->flags;
action[len++] = info->context_idx;
action[len++] = info->engine_class;
action[len++] = info->engine_submit_mask;
action[len++] = info->wq_desc_lo;
action[len++] = info->wq_desc_hi;
action[len++] = info->wq_base_lo;
action[len++] = info->wq_base_hi;
action[len++] = info->wq_size;
action[len++] = ce->parallel.number_children + 1;
action[len++] = info->hwlrca_lo;
action[len++] = info->hwlrca_hi;
next_id = info->context_idx + 1;
for_each_child(ce, child) {
GEM_BUG_ON(next_id++ != child->guc_id.id);
/*
* NB: GuC interface supports 64 bit LRCA even though i915/HW
* only supports 32 bit currently.
*/
action[len++] = lower_32_bits(child->lrc.lrca);
action[len++] = upper_32_bits(child->lrc.lrca);
}
GEM_BUG_ON(len > ARRAY_SIZE(action));
return guc_submission_send_busy_loop(guc, action, len, 0, loop);
}
static int __guc_action_register_context_v69(struct intel_guc *guc,
u32 guc_id,
u32 offset,
bool loop)
{
u32 action[] = {
INTEL_GUC_ACTION_REGISTER_CONTEXT,
guc_id,
offset,
};
return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
0, loop);
}
static int __guc_action_register_context_v70(struct intel_guc *guc,
struct guc_ctxt_registration_info *info,
bool loop)
{
u32 action[] = {
INTEL_GUC_ACTION_REGISTER_CONTEXT,
info->flags,
info->context_idx,
info->engine_class,
info->engine_submit_mask,
info->wq_desc_lo,
info->wq_desc_hi,
info->wq_base_lo,
info->wq_base_hi,
info->wq_size,
info->hwlrca_lo,
info->hwlrca_hi,
};
return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
0, loop);
}
static void prepare_context_registration_info_v69(struct intel_context *ce);
static void prepare_context_registration_info_v70(struct intel_context *ce,
struct guc_ctxt_registration_info *info);
static int
register_context_v69(struct intel_guc *guc, struct intel_context *ce, bool loop)
{
u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool_v69) +
ce->guc_id.id * sizeof(struct guc_lrc_desc_v69);
prepare_context_registration_info_v69(ce);
if (intel_context_is_parent(ce))
return __guc_action_register_multi_lrc_v69(guc, ce, ce->guc_id.id,
offset, loop);
else
return __guc_action_register_context_v69(guc, ce->guc_id.id,
offset, loop);
}
static int
register_context_v70(struct intel_guc *guc, struct intel_context *ce, bool loop)
{
struct guc_ctxt_registration_info info;
prepare_context_registration_info_v70(ce, &info);
if (intel_context_is_parent(ce))
return __guc_action_register_multi_lrc_v70(guc, ce, &info, loop);
else
return __guc_action_register_context_v70(guc, &info, loop);
}
static int register_context(struct intel_context *ce, bool loop)
{
struct intel_guc *guc = ce_to_guc(ce);
int ret;
GEM_BUG_ON(intel_context_is_child(ce));
trace_intel_context_register(ce);
if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 0, 0))
ret = register_context_v70(guc, ce, loop);
else
ret = register_context_v69(guc, ce, loop);
if (likely(!ret)) {
unsigned long flags;
spin_lock_irqsave(&ce->guc_state.lock, flags);
set_context_registered(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 0, 0))
guc_context_policy_init_v70(ce, loop);
}
return ret;
}
static int __guc_action_deregister_context(struct intel_guc *guc,
u32 guc_id)
{
u32 action[] = {
INTEL_GUC_ACTION_DEREGISTER_CONTEXT,
guc_id,
};
return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
G2H_LEN_DW_DEREGISTER_CONTEXT,
true);
}
static int deregister_context(struct intel_context *ce, u32 guc_id)
{
struct intel_guc *guc = ce_to_guc(ce);
GEM_BUG_ON(intel_context_is_child(ce));
trace_intel_context_deregister(ce);
return __guc_action_deregister_context(guc, guc_id);
}
static inline void clear_children_join_go_memory(struct intel_context *ce)
{
struct parent_scratch *ps = __get_parent_scratch(ce);
int i;
ps->go.semaphore = 0;
for (i = 0; i < ce->parallel.number_children + 1; ++i)
ps->join[i].semaphore = 0;
}
static inline u32 get_children_go_value(struct intel_context *ce)
{
return __get_parent_scratch(ce)->go.semaphore;
}
static inline u32 get_children_join_value(struct intel_context *ce,
u8 child_index)
{
return __get_parent_scratch(ce)->join[child_index].semaphore;
}
struct context_policy {
u32 count;
struct guc_update_context_policy h2g;
};
static u32 __guc_context_policy_action_size(struct context_policy *policy)
{
size_t bytes = sizeof(policy->h2g.header) +
(sizeof(policy->h2g.klv[0]) * policy->count);
return bytes / sizeof(u32);
}
static void __guc_context_policy_start_klv(struct context_policy *policy, u16 guc_id)
{
policy->h2g.header.action = INTEL_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES;
policy->h2g.header.ctx_id = guc_id;
policy->count = 0;
}
#define MAKE_CONTEXT_POLICY_ADD(func, id) \
static void __guc_context_policy_add_##func(struct context_policy *policy, u32 data) \
{ \
GEM_BUG_ON(policy->count >= GUC_CONTEXT_POLICIES_KLV_NUM_IDS); \
policy->h2g.klv[policy->count].kl = \
FIELD_PREP(GUC_KLV_0_KEY, GUC_CONTEXT_POLICIES_KLV_ID_##id) | \
FIELD_PREP(GUC_KLV_0_LEN, 1); \
policy->h2g.klv[policy->count].value = data; \
policy->count++; \
}
MAKE_CONTEXT_POLICY_ADD(execution_quantum, EXECUTION_QUANTUM)
MAKE_CONTEXT_POLICY_ADD(preemption_timeout, PREEMPTION_TIMEOUT)
MAKE_CONTEXT_POLICY_ADD(priority, SCHEDULING_PRIORITY)
MAKE_CONTEXT_POLICY_ADD(preempt_to_idle, PREEMPT_TO_IDLE_ON_QUANTUM_EXPIRY)
#undef MAKE_CONTEXT_POLICY_ADD
static int __guc_context_set_context_policies(struct intel_guc *guc,
struct context_policy *policy,
bool loop)
{
return guc_submission_send_busy_loop(guc, (u32 *)&policy->h2g,
__guc_context_policy_action_size(policy),
0, loop);
}
static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
{
struct intel_engine_cs *engine = ce->engine;
struct intel_guc *guc = &engine->gt->uc.guc;
struct context_policy policy;
u32 execution_quantum;
u32 preemption_timeout;
unsigned long flags;
int ret;
/* NB: For both of these, zero means disabled. */
GEM_BUG_ON(overflows_type(engine->props.timeslice_duration_ms * 1000,
execution_quantum));
GEM_BUG_ON(overflows_type(engine->props.preempt_timeout_ms * 1000,
preemption_timeout));
execution_quantum = engine->props.timeslice_duration_ms * 1000;
preemption_timeout = engine->props.preempt_timeout_ms * 1000;
__guc_context_policy_start_klv(&policy, ce->guc_id.id);
__guc_context_policy_add_priority(&policy, ce->guc_state.prio);
__guc_context_policy_add_execution_quantum(&policy, execution_quantum);
__guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
__guc_context_policy_add_preempt_to_idle(&policy, 1);
ret = __guc_context_set_context_policies(guc, &policy, loop);
spin_lock_irqsave(&ce->guc_state.lock, flags);
if (ret != 0)
set_context_policy_required(ce);
else
clr_context_policy_required(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
return ret;
}
static void guc_context_policy_init_v69(struct intel_engine_cs *engine,
struct guc_lrc_desc_v69 *desc)
{
desc->policy_flags = 0;
if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
desc->policy_flags |= CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE_V69;
/* NB: For both of these, zero means disabled. */
GEM_BUG_ON(overflows_type(engine->props.timeslice_duration_ms * 1000,
desc->execution_quantum));
GEM_BUG_ON(overflows_type(engine->props.preempt_timeout_ms * 1000,
desc->preemption_timeout));
desc->execution_quantum = engine->props.timeslice_duration_ms * 1000;
desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
}
static u32 map_guc_prio_to_lrc_desc_prio(u8 prio)
{
/*
* this matches the mapping we do in map_i915_prio_to_guc_prio()
* (e.g. prio < I915_PRIORITY_NORMAL maps to GUC_CLIENT_PRIORITY_NORMAL)
*/
switch (prio) {
default:
MISSING_CASE(prio);
fallthrough;
case GUC_CLIENT_PRIORITY_KMD_NORMAL:
return GEN12_CTX_PRIORITY_NORMAL;
case GUC_CLIENT_PRIORITY_NORMAL:
return GEN12_CTX_PRIORITY_LOW;
case GUC_CLIENT_PRIORITY_HIGH:
case GUC_CLIENT_PRIORITY_KMD_HIGH:
return GEN12_CTX_PRIORITY_HIGH;
}
}
static void prepare_context_registration_info_v69(struct intel_context *ce)
{
struct intel_engine_cs *engine = ce->engine;
struct intel_guc *guc = &engine->gt->uc.guc;
u32 ctx_id = ce->guc_id.id;
struct guc_lrc_desc_v69 *desc;
struct intel_context *child;
GEM_BUG_ON(!engine->mask);
/*
* Ensure LRC + CT vmas are is same region as write barrier is done
* based on CT vma region.
*/
GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) !=
i915_gem_object_is_lmem(ce->ring->vma->obj));
desc = __get_lrc_desc_v69(guc, ctx_id);
GEM_BUG_ON(!desc);
desc->engine_class = engine_class_to_guc_class(engine->class);
desc->engine_submit_mask = engine->logical_mask;
desc->hw_context_desc = ce->lrc.lrca;
desc->priority = ce->guc_state.prio;
desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
guc_context_policy_init_v69(engine, desc);
/*
* If context is a parent, we need to register a process descriptor
* describing a work queue and register all child contexts.
*/
if (intel_context_is_parent(ce)) {
struct guc_process_desc_v69 *pdesc;
ce->parallel.guc.wqi_tail = 0;
ce->parallel.guc.wqi_head = 0;
desc->process_desc = i915_ggtt_offset(ce->state) +
__get_parent_scratch_offset(ce);
desc->wq_addr = i915_ggtt_offset(ce->state) +
__get_wq_offset(ce);
desc->wq_size = WQ_SIZE;
pdesc = __get_process_desc_v69(ce);
memset(pdesc, 0, sizeof(*(pdesc)));
pdesc->stage_id = ce->guc_id.id;
pdesc->wq_base_addr = desc->wq_addr;
pdesc->wq_size_bytes = desc->wq_size;
pdesc->wq_status = WQ_STATUS_ACTIVE;
ce->parallel.guc.wq_head = &pdesc->head;
ce->parallel.guc.wq_tail = &pdesc->tail;
ce->parallel.guc.wq_status = &pdesc->wq_status;
for_each_child(ce, child) {
desc = __get_lrc_desc_v69(guc, child->guc_id.id);
desc->engine_class =
engine_class_to_guc_class(engine->class);
desc->hw_context_desc = child->lrc.lrca;
desc->priority = ce->guc_state.prio;
desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
guc_context_policy_init_v69(engine, desc);
}
clear_children_join_go_memory(ce);
}
}
static void prepare_context_registration_info_v70(struct intel_context *ce,
struct guc_ctxt_registration_info *info)
{
struct intel_engine_cs *engine = ce->engine;
struct intel_guc *guc = &engine->gt->uc.guc;
u32 ctx_id = ce->guc_id.id;
GEM_BUG_ON(!engine->mask);
/*
* Ensure LRC + CT vmas are is same region as write barrier is done
* based on CT vma region.
*/
GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) !=
i915_gem_object_is_lmem(ce->ring->vma->obj));
memset(info, 0, sizeof(*info));
info->context_idx = ctx_id;
info->engine_class = engine_class_to_guc_class(engine->class);
info->engine_submit_mask = engine->logical_mask;
/*
* NB: GuC interface supports 64 bit LRCA even though i915/HW
* only supports 32 bit currently.
*/
info->hwlrca_lo = lower_32_bits(ce->lrc.lrca);
info->hwlrca_hi = upper_32_bits(ce->lrc.lrca);
if (engine->flags & I915_ENGINE_HAS_EU_PRIORITY)
info->hwlrca_lo |= map_guc_prio_to_lrc_desc_prio(ce->guc_state.prio);
info->flags = CONTEXT_REGISTRATION_FLAG_KMD;
/*
* If context is a parent, we need to register a process descriptor
* describing a work queue and register all child contexts.
*/
if (intel_context_is_parent(ce)) {
struct guc_sched_wq_desc *wq_desc;
u64 wq_desc_offset, wq_base_offset;
ce->parallel.guc.wqi_tail = 0;
ce->parallel.guc.wqi_head = 0;
wq_desc_offset = i915_ggtt_offset(ce->state) +
__get_parent_scratch_offset(ce);
wq_base_offset = i915_ggtt_offset(ce->state) +
__get_wq_offset(ce);
info->wq_desc_lo = lower_32_bits(wq_desc_offset);
info->wq_desc_hi = upper_32_bits(wq_desc_offset);
info->wq_base_lo = lower_32_bits(wq_base_offset);
info->wq_base_hi = upper_32_bits(wq_base_offset);
info->wq_size = WQ_SIZE;
wq_desc = __get_wq_desc_v70(ce);
memset(wq_desc, 0, sizeof(*wq_desc));
wq_desc->wq_status = WQ_STATUS_ACTIVE;
ce->parallel.guc.wq_head = &wq_desc->head;
ce->parallel.guc.wq_tail = &wq_desc->tail;
ce->parallel.guc.wq_status = &wq_desc->wq_status;
clear_children_join_go_memory(ce);
}
}
static int try_context_registration(struct intel_context *ce, bool loop)
{
struct intel_engine_cs *engine = ce->engine;
struct intel_runtime_pm *runtime_pm = engine->uncore->rpm;
struct intel_guc *guc = &engine->gt->uc.guc;
intel_wakeref_t wakeref;
u32 ctx_id = ce->guc_id.id;
bool context_registered;
int ret = 0;
GEM_BUG_ON(!sched_state_is_init(ce));
context_registered = ctx_id_mapped(guc, ctx_id);
clr_ctx_id_mapping(guc, ctx_id);
set_ctx_id_mapping(guc, ctx_id, ce);
/*
* The context_lookup xarray is used to determine if the hardware
* context is currently registered. There are two cases in which it
* could be registered either the guc_id has been stolen from another
* context or the lrc descriptor address of this context has changed. In
* either case the context needs to be deregistered with the GuC before
* registering this context.
*/
if (context_registered) {
bool disabled;
unsigned long flags;
trace_intel_context_steal_guc_id(ce);
GEM_BUG_ON(!loop);
/* Seal race with Reset */
spin_lock_irqsave(&ce->guc_state.lock, flags);
disabled = submission_disabled(guc);
if (likely(!disabled)) {
set_context_wait_for_deregister_to_register(ce);
intel_context_get(ce);
}
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
if (unlikely(disabled)) {
clr_ctx_id_mapping(guc, ctx_id);
return 0; /* Will get registered later */
}
/*
* If stealing the guc_id, this ce has the same guc_id as the
* context whose guc_id was stolen.
*/
with_intel_runtime_pm(runtime_pm, wakeref)
ret = deregister_context(ce, ce->guc_id.id);
if (unlikely(ret == -ENODEV))
ret = 0; /* Will get registered later */
} else {
with_intel_runtime_pm(runtime_pm, wakeref)
ret = register_context(ce, loop);
if (unlikely(ret == -EBUSY)) {
clr_ctx_id_mapping(guc, ctx_id);
} else if (unlikely(ret == -ENODEV)) {
clr_ctx_id_mapping(guc, ctx_id);
ret = 0; /* Will get registered later */
}
}
return ret;
}
static int __guc_context_pre_pin(struct intel_context *ce,
struct intel_engine_cs *engine,
struct i915_gem_ww_ctx *ww,
void **vaddr)
{
return lrc_pre_pin(ce, engine, ww, vaddr);
}
static int __guc_context_pin(struct intel_context *ce,
struct intel_engine_cs *engine,
void *vaddr)
{
if (i915_ggtt_offset(ce->state) !=
(ce->lrc.lrca & CTX_GTT_ADDRESS_MASK))
set_bit(CONTEXT_LRCA_DIRTY, &ce->flags);
/*
* GuC context gets pinned in guc_request_alloc. See that function for
* explaination of why.
*/
return lrc_pin(ce, engine, vaddr);
}
static int guc_context_pre_pin(struct intel_context *ce,
struct i915_gem_ww_ctx *ww,
void **vaddr)
{
return __guc_context_pre_pin(ce, ce->engine, ww, vaddr);
}
static int guc_context_pin(struct intel_context *ce, void *vaddr)
{
int ret = __guc_context_pin(ce, ce->engine, vaddr);
if (likely(!ret && !intel_context_is_barrier(ce)))
intel_engine_pm_get(ce->engine);
return ret;
}
static void guc_context_unpin(struct intel_context *ce)
{
struct intel_guc *guc = ce_to_guc(ce);
__guc_context_update_stats(ce);
unpin_guc_id(guc, ce);
lrc_unpin(ce);
if (likely(!intel_context_is_barrier(ce)))
intel_engine_pm_put_async(ce->engine);
}
static void guc_context_post_unpin(struct intel_context *ce)
{
lrc_post_unpin(ce);
}
static void __guc_context_sched_enable(struct intel_guc *guc,
struct intel_context *ce)
{
u32 action[] = {
INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET,
ce->guc_id.id,
GUC_CONTEXT_ENABLE
};
trace_intel_context_sched_enable(ce);
guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true);
}
static void __guc_context_sched_disable(struct intel_guc *guc,
struct intel_context *ce,
u16 guc_id)
{
u32 action[] = {
INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET,
guc_id, /* ce->guc_id.id not stable */
GUC_CONTEXT_DISABLE
};
GEM_BUG_ON(guc_id == GUC_INVALID_CONTEXT_ID);
GEM_BUG_ON(intel_context_is_child(ce));
trace_intel_context_sched_disable(ce);
guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true);
}
static void guc_blocked_fence_complete(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
if (!i915_sw_fence_done(&ce->guc_state.blocked))
i915_sw_fence_complete(&ce->guc_state.blocked);
}
static void guc_blocked_fence_reinit(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
GEM_BUG_ON(!i915_sw_fence_done(&ce->guc_state.blocked));
/*
* This fence is always complete unless a pending schedule disable is
* outstanding. We arm the fence here and complete it when we receive
* the pending schedule disable complete message.
*/
i915_sw_fence_fini(&ce->guc_state.blocked);
i915_sw_fence_reinit(&ce->guc_state.blocked);
i915_sw_fence_await(&ce->guc_state.blocked);
i915_sw_fence_commit(&ce->guc_state.blocked);
}
static u16 prep_context_pending_disable(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
set_context_pending_disable(ce);
clr_context_enabled(ce);
guc_blocked_fence_reinit(ce);
intel_context_get(ce);
return ce->guc_id.id;
}
static struct i915_sw_fence *guc_context_block(struct intel_context *ce)
{
struct intel_guc *guc = ce_to_guc(ce);
unsigned long flags;
struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
intel_wakeref_t wakeref;
u16 guc_id;
bool enabled;
GEM_BUG_ON(intel_context_is_child(ce));
spin_lock_irqsave(&ce->guc_state.lock, flags);
incr_context_blocked(ce);
enabled = context_enabled(ce);
if (unlikely(!enabled || submission_disabled(guc))) {
if (enabled)
clr_context_enabled(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
return &ce->guc_state.blocked;
}
/*
* We add +2 here as the schedule disable complete CTB handler calls
* intel_context_sched_disable_unpin (-2 to pin_count).
*/
atomic_add(2, &ce->pin_count);
guc_id = prep_context_pending_disable(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
with_intel_runtime_pm(runtime_pm, wakeref)
__guc_context_sched_disable(guc, ce, guc_id);
return &ce->guc_state.blocked;
}
#define SCHED_STATE_MULTI_BLOCKED_MASK \
(SCHED_STATE_BLOCKED_MASK & ~SCHED_STATE_BLOCKED)
#define SCHED_STATE_NO_UNBLOCK \
(SCHED_STATE_MULTI_BLOCKED_MASK | \
SCHED_STATE_PENDING_DISABLE | \
SCHED_STATE_BANNED)
static bool context_cant_unblock(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) ||
context_guc_id_invalid(ce) ||
!ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id) ||
!intel_context_is_pinned(ce);
}
static void guc_context_unblock(struct intel_context *ce)
{
struct intel_guc *guc = ce_to_guc(ce);
unsigned long flags;
struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
intel_wakeref_t wakeref;
bool enable;
GEM_BUG_ON(context_enabled(ce));
GEM_BUG_ON(intel_context_is_child(ce));
spin_lock_irqsave(&ce->guc_state.lock, flags);
if (unlikely(submission_disabled(guc) ||
context_cant_unblock(ce))) {
enable = false;
} else {
enable = true;
set_context_pending_enable(ce);
set_context_enabled(ce);
intel_context_get(ce);
}
decr_context_blocked(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
if (enable) {
with_intel_runtime_pm(runtime_pm, wakeref)
__guc_context_sched_enable(guc, ce);
}
}
static void guc_context_cancel_request(struct intel_context *ce,
struct i915_request *rq)
{
struct intel_context *block_context =
request_to_scheduling_context(rq);
if (i915_sw_fence_signaled(&rq->submit)) {
struct i915_sw_fence *fence;
intel_context_get(ce);
fence = guc_context_block(block_context);
i915_sw_fence_wait(fence);
if (!i915_request_completed(rq)) {
__i915_request_skip(rq);
guc_reset_state(ce, intel_ring_wrap(ce->ring, rq->head),
true);
}
guc_context_unblock(block_context);
intel_context_put(ce);
}
}
static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
u16 guc_id,
u32 preemption_timeout)
{
if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 0, 0)) {
struct context_policy policy;
__guc_context_policy_start_klv(&policy, guc_id);
__guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
__guc_context_set_context_policies(guc, &policy, true);
} else {
u32 action[] = {
INTEL_GUC_ACTION_V69_SET_CONTEXT_PREEMPTION_TIMEOUT,
guc_id,
preemption_timeout
};
intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
}
}
static void
guc_context_revoke(struct intel_context *ce, struct i915_request *rq,
unsigned int preempt_timeout_ms)
{
struct intel_guc *guc = ce_to_guc(ce);
struct intel_runtime_pm *runtime_pm =
&ce->engine->gt->i915->runtime_pm;
intel_wakeref_t wakeref;
unsigned long flags;
GEM_BUG_ON(intel_context_is_child(ce));
guc_flush_submissions(guc);
spin_lock_irqsave(&ce->guc_state.lock, flags);
set_context_banned(ce);
if (submission_disabled(guc) ||
(!context_enabled(ce) && !context_pending_disable(ce))) {
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
guc_cancel_context_requests(ce);
intel_engine_signal_breadcrumbs(ce->engine);
} else if (!context_pending_disable(ce)) {
u16 guc_id;
/*
* We add +2 here as the schedule disable complete CTB handler
* calls intel_context_sched_disable_unpin (-2 to pin_count).
*/
atomic_add(2, &ce->pin_count);
guc_id = prep_context_pending_disable(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
/*
* In addition to disabling scheduling, set the preemption
* timeout to the minimum value (1 us) so the banned context
* gets kicked off the HW ASAP.
*/
with_intel_runtime_pm(runtime_pm, wakeref) {
__guc_context_set_preemption_timeout(guc, guc_id,
preempt_timeout_ms);
__guc_context_sched_disable(guc, ce, guc_id);
}
} else {
if (!context_guc_id_invalid(ce))
with_intel_runtime_pm(runtime_pm, wakeref)
__guc_context_set_preemption_timeout(guc,
ce->guc_id.id,
preempt_timeout_ms);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
}
}
static void do_sched_disable(struct intel_guc *guc, struct intel_context *ce,
unsigned long flags)
__releases(ce->guc_state.lock)
{
struct intel_runtime_pm *runtime_pm = &ce->engine->gt->i915->runtime_pm;
intel_wakeref_t wakeref;
u16 guc_id;
lockdep_assert_held(&ce->guc_state.lock);
guc_id = prep_context_pending_disable(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
with_intel_runtime_pm(runtime_pm, wakeref)
__guc_context_sched_disable(guc, ce, guc_id);
}
static bool bypass_sched_disable(struct intel_guc *guc,
struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
GEM_BUG_ON(intel_context_is_child(ce));
if (submission_disabled(guc) || context_guc_id_invalid(ce) ||
!ctx_id_mapped(guc, ce->guc_id.id)) {
clr_context_enabled(ce);
return true;
}
return !context_enabled(ce);
}
static void __delay_sched_disable(struct work_struct *wrk)
{
struct intel_context *ce =
container_of(wrk, typeof(*ce), guc_state.sched_disable_delay_work.work);
struct intel_guc *guc = ce_to_guc(ce);
unsigned long flags;
spin_lock_irqsave(&ce->guc_state.lock, flags);
if (bypass_sched_disable(guc, ce)) {
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
intel_context_sched_disable_unpin(ce);
} else {
do_sched_disable(guc, ce, flags);
}
}
static bool guc_id_pressure(struct intel_guc *guc, struct intel_context *ce)
{
/*
* parent contexts are perma-pinned, if we are unpinning do schedule
* disable immediately.
*/
if (intel_context_is_parent(ce))
return true;
/*
* If we are beyond the threshold for avail guc_ids, do schedule disable immediately.
*/
return guc->submission_state.guc_ids_in_use >
guc->submission_state.sched_disable_gucid_threshold;
}
static void guc_context_sched_disable(struct intel_context *ce)
{
struct intel_guc *guc = ce_to_guc(ce);
u64 delay = guc->submission_state.sched_disable_delay_ms;
unsigned long flags;
spin_lock_irqsave(&ce->guc_state.lock, flags);
if (bypass_sched_disable(guc, ce)) {
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
intel_context_sched_disable_unpin(ce);
} else if (!intel_context_is_closed(ce) && !guc_id_pressure(guc, ce) &&
delay) {
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
mod_delayed_work(system_unbound_wq,
&ce->guc_state.sched_disable_delay_work,
msecs_to_jiffies(delay));
} else {
do_sched_disable(guc, ce, flags);
}
}
static void guc_context_close(struct intel_context *ce)
{
unsigned long flags;
if (test_bit(CONTEXT_GUC_INIT, &ce->flags) &&
cancel_delayed_work(&ce->guc_state.sched_disable_delay_work))
__delay_sched_disable(&ce->guc_state.sched_disable_delay_work.work);
spin_lock_irqsave(&ce->guc_state.lock, flags);
set_context_close_done(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
}
static inline void guc_lrc_desc_unpin(struct intel_context *ce)
{
struct intel_guc *guc = ce_to_guc(ce);
struct intel_gt *gt = guc_to_gt(guc);
unsigned long flags;
bool disabled;
GEM_BUG_ON(!intel_gt_pm_is_awake(gt));
GEM_BUG_ON(!ctx_id_mapped(guc, ce->guc_id.id));
GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id));
GEM_BUG_ON(context_enabled(ce));
/* Seal race with Reset */
spin_lock_irqsave(&ce->guc_state.lock, flags);
disabled = submission_disabled(guc);
if (likely(!disabled)) {
__intel_gt_pm_get(gt);
set_context_destroyed(ce);
clr_context_registered(ce);
}
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
if (unlikely(disabled)) {
release_guc_id(guc, ce);
__guc_context_destroy(ce);
return;
}
deregister_context(ce, ce->guc_id.id);
}
static void __guc_context_destroy(struct intel_context *ce)
{
GEM_BUG_ON(ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] ||
ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
lrc_fini(ce);
intel_context_fini(ce);
if (intel_engine_is_virtual(ce->engine)) {
struct guc_virtual_engine *ve =
container_of(ce, typeof(*ve), context);
if (ve->base.breadcrumbs)
intel_breadcrumbs_put(ve->base.breadcrumbs);
kfree(ve);
} else {
intel_context_free(ce);
}
}
static void guc_flush_destroyed_contexts(struct intel_guc *guc)
{
struct intel_context *ce;
unsigned long flags;
GEM_BUG_ON(!submission_disabled(guc) &&
guc_submission_initialized(guc));
while (!list_empty(&guc->submission_state.destroyed_contexts)) {
spin_lock_irqsave(&guc->submission_state.lock, flags);
ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts,
struct intel_context,
destroyed_link);
if (ce)
list_del_init(&ce->destroyed_link);
spin_unlock_irqrestore(&guc->submission_state.lock, flags);
if (!ce)
break;
release_guc_id(guc, ce);
__guc_context_destroy(ce);
}
}
static void deregister_destroyed_contexts(struct intel_guc *guc)
{
struct intel_context *ce;
unsigned long flags;
while (!list_empty(&guc->submission_state.destroyed_contexts)) {
spin_lock_irqsave(&guc->submission_state.lock, flags);
ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts,
struct intel_context,
destroyed_link);
if (ce)
list_del_init(&ce->destroyed_link);
spin_unlock_irqrestore(&guc->submission_state.lock, flags);
if (!ce)
break;
guc_lrc_desc_unpin(ce);
}
}
static void destroyed_worker_func(struct work_struct *w)
{
struct intel_guc *guc = container_of(w, struct intel_guc,
submission_state.destroyed_worker);
struct intel_gt *gt = guc_to_gt(guc);
int tmp;
with_intel_gt_pm(gt, tmp)
deregister_destroyed_contexts(guc);
}
static void guc_context_destroy(struct kref *kref)
{
struct intel_context *ce = container_of(kref, typeof(*ce), ref);
struct intel_guc *guc = ce_to_guc(ce);
unsigned long flags;
bool destroy;
/*
* If the guc_id is invalid this context has been stolen and we can free
* it immediately. Also can be freed immediately if the context is not
* registered with the GuC or the GuC is in the middle of a reset.
*/
spin_lock_irqsave(&guc->submission_state.lock, flags);
destroy = submission_disabled(guc) || context_guc_id_invalid(ce) ||
!ctx_id_mapped(guc, ce->guc_id.id);
if (likely(!destroy)) {
if (!list_empty(&ce->guc_id.link))
list_del_init(&ce->guc_id.link);
list_add_tail(&ce->destroyed_link,
&guc->submission_state.destroyed_contexts);
} else {
__release_guc_id(guc, ce);
}
spin_unlock_irqrestore(&guc->submission_state.lock, flags);
if (unlikely(destroy)) {
__guc_context_destroy(ce);
return;
}
/*
* We use a worker to issue the H2G to deregister the context as we can
* take the GT PM for the first time which isn't allowed from an atomic
* context.
*/
queue_work(system_unbound_wq, &guc->submission_state.destroyed_worker);
}
static int guc_context_alloc(struct intel_context *ce)
{
return lrc_alloc(ce, ce->engine);
}
static void __guc_context_set_prio(struct intel_guc *guc,
struct intel_context *ce)
{
if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 0, 0)) {
struct context_policy policy;
__guc_context_policy_start_klv(&policy, ce->guc_id.id);
__guc_context_policy_add_priority(&policy, ce->guc_state.prio);
__guc_context_set_context_policies(guc, &policy, true);
} else {
u32 action[] = {
INTEL_GUC_ACTION_V69_SET_CONTEXT_PRIORITY,
ce->guc_id.id,
ce->guc_state.prio,
};
guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
}
}
static void guc_context_set_prio(struct intel_guc *guc,
struct intel_context *ce,
u8 prio)
{
GEM_BUG_ON(prio < GUC_CLIENT_PRIORITY_KMD_HIGH ||
prio > GUC_CLIENT_PRIORITY_NORMAL);
lockdep_assert_held(&ce->guc_state.lock);
if (ce->guc_state.prio == prio || submission_disabled(guc) ||
!context_registered(ce)) {
ce->guc_state.prio = prio;
return;
}
ce->guc_state.prio = prio;
__guc_context_set_prio(guc, ce);
trace_intel_context_set_prio(ce);
}
static inline u8 map_i915_prio_to_guc_prio(int prio)
{
if (prio == I915_PRIORITY_NORMAL)
return GUC_CLIENT_PRIORITY_KMD_NORMAL;
else if (prio < I915_PRIORITY_NORMAL)
return GUC_CLIENT_PRIORITY_NORMAL;
else if (prio < I915_PRIORITY_DISPLAY)
return GUC_CLIENT_PRIORITY_HIGH;
else
return GUC_CLIENT_PRIORITY_KMD_HIGH;
}
static inline void add_context_inflight_prio(struct intel_context *ce,
u8 guc_prio)
{
lockdep_assert_held(&ce->guc_state.lock);
GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count));
++ce->guc_state.prio_count[guc_prio];
/* Overflow protection */
GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]);
}
static inline void sub_context_inflight_prio(struct intel_context *ce,
u8 guc_prio)
{
lockdep_assert_held(&ce->guc_state.lock);
GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count));
/* Underflow protection */
GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]);
--ce->guc_state.prio_count[guc_prio];
}
static inline void update_context_prio(struct intel_context *ce)
{
struct intel_guc *guc = &ce->engine->gt->uc.guc;
int i;
BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH != 0);
BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH > GUC_CLIENT_PRIORITY_NORMAL);
lockdep_assert_held(&ce->guc_state.lock);
for (i = 0; i < ARRAY_SIZE(ce->guc_state.prio_count); ++i) {
if (ce->guc_state.prio_count[i]) {
guc_context_set_prio(guc, ce, i);
break;
}
}
}
static inline bool new_guc_prio_higher(u8 old_guc_prio, u8 new_guc_prio)
{
/* Lower value is higher priority */
return new_guc_prio < old_guc_prio;
}
static void add_to_context(struct i915_request *rq)
{
struct intel_context *ce = request_to_scheduling_context(rq);
u8 new_guc_prio = map_i915_prio_to_guc_prio(rq_prio(rq));
GEM_BUG_ON(intel_context_is_child(ce));
GEM_BUG_ON(rq->guc_prio == GUC_PRIO_FINI);
spin_lock(&ce->guc_state.lock);
list_move_tail(&rq->sched.link, &ce->guc_state.requests);
if (rq->guc_prio == GUC_PRIO_INIT) {
rq->guc_prio = new_guc_prio;
add_context_inflight_prio(ce, rq->guc_prio);
} else if (new_guc_prio_higher(rq->guc_prio, new_guc_prio)) {
sub_context_inflight_prio(ce, rq->guc_prio);
rq->guc_prio = new_guc_prio;
add_context_inflight_prio(ce, rq->guc_prio);
}
update_context_prio(ce);
spin_unlock(&ce->guc_state.lock);
}
static void guc_prio_fini(struct i915_request *rq, struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
if (rq->guc_prio != GUC_PRIO_INIT &&
rq->guc_prio != GUC_PRIO_FINI) {
sub_context_inflight_prio(ce, rq->guc_prio);
update_context_prio(ce);
}
rq->guc_prio = GUC_PRIO_FINI;
}
static void remove_from_context(struct i915_request *rq)
{
struct intel_context *ce = request_to_scheduling_context(rq);
GEM_BUG_ON(intel_context_is_child(ce));
spin_lock_irq(&ce->guc_state.lock);
list_del_init(&rq->sched.link);
clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
/* Prevent further __await_execution() registering a cb, then flush */
set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
guc_prio_fini(rq, ce);
spin_unlock_irq(&ce->guc_state.lock);
atomic_dec(&ce->guc_id.ref);
i915_request_notify_execute_cb_imm(rq);
}
static const struct intel_context_ops guc_context_ops = {
.flags = COPS_RUNTIME_CYCLES,
.alloc = guc_context_alloc,
.close = guc_context_close,
.pre_pin = guc_context_pre_pin,
.pin = guc_context_pin,
.unpin = guc_context_unpin,
.post_unpin = guc_context_post_unpin,
.revoke = guc_context_revoke,
.cancel_request = guc_context_cancel_request,
.enter = intel_context_enter_engine,
.exit = intel_context_exit_engine,
.sched_disable = guc_context_sched_disable,
.update_stats = guc_context_update_stats,
.reset = lrc_reset,
.destroy = guc_context_destroy,
.create_virtual = guc_create_virtual,
.create_parallel = guc_create_parallel,
};
static void submit_work_cb(struct irq_work *wrk)
{
struct i915_request *rq = container_of(wrk, typeof(*rq), submit_work);
might_lock(&rq->engine->sched_engine->lock);
i915_sw_fence_complete(&rq->submit);
}
static void __guc_signal_context_fence(struct intel_context *ce)
{
struct i915_request *rq, *rn;
lockdep_assert_held(&ce->guc_state.lock);
if (!list_empty(&ce->guc_state.fences))
trace_intel_context_fence_release(ce);
/*
* Use an IRQ to ensure locking order of sched_engine->lock ->
* ce->guc_state.lock is preserved.
*/
list_for_each_entry_safe(rq, rn, &ce->guc_state.fences,
guc_fence_link) {
list_del(&rq->guc_fence_link);
irq_work_queue(&rq->submit_work);
}
INIT_LIST_HEAD(&ce->guc_state.fences);
}
static void guc_signal_context_fence(struct intel_context *ce)
{
unsigned long flags;
GEM_BUG_ON(intel_context_is_child(ce));
spin_lock_irqsave(&ce->guc_state.lock, flags);
clr_context_wait_for_deregister_to_register(ce);
__guc_signal_context_fence(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
}
static bool context_needs_register(struct intel_context *ce, bool new_guc_id)
{
return (new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) ||
!ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id)) &&
!submission_disabled(ce_to_guc(ce));
}
static void guc_context_init(struct intel_context *ce)
{
const struct i915_gem_context *ctx;
int prio = I915_CONTEXT_DEFAULT_PRIORITY;
rcu_read_lock();
ctx = rcu_dereference(ce->gem_context);
if (ctx)
prio = ctx->sched.priority;
rcu_read_unlock();
ce->guc_state.prio = map_i915_prio_to_guc_prio(prio);
INIT_DELAYED_WORK(&ce->guc_state.sched_disable_delay_work,
__delay_sched_disable);
set_bit(CONTEXT_GUC_INIT, &ce->flags);
}
static int guc_request_alloc(struct i915_request *rq)
{
struct intel_context *ce = request_to_scheduling_context(rq);
struct intel_guc *guc = ce_to_guc(ce);
unsigned long flags;
int ret;
GEM_BUG_ON(!intel_context_is_pinned(rq->context));
/*
* Flush enough space to reduce the likelihood of waiting after
* we start building the request - in which case we will just
* have to repeat work.
*/
rq->reserved_space += GUC_REQUEST_SIZE;
/*
* Note that after this point, we have committed to using
* this request as it is being used to both track the
* state of engine initialisation and liveness of the
* golden renderstate above. Think twice before you try
* to cancel/unwind this request now.
*/
/* Unconditionally invalidate GPU caches and TLBs. */
ret = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
if (ret)
return ret;
rq->reserved_space -= GUC_REQUEST_SIZE;
if (unlikely(!test_bit(CONTEXT_GUC_INIT, &ce->flags)))
guc_context_init(ce);
/*
* If the context gets closed while the execbuf is ongoing, the context
* close code will race with the below code to cancel the delayed work.
* If the context close wins the race and cancels the work, it will
* immediately call the sched disable (see guc_context_close), so there
* is a chance we can get past this check while the sched_disable code
* is being executed. To make sure that code completes before we check
* the status further down, we wait for the close process to complete.
* Else, this code path could send a request down thinking that the
* context is still in a schedule-enable mode while the GuC ends up
* dropping the request completely because the disable did go from the
* context_close path right to GuC just prior. In the event the CT is
* full, we could potentially need to wait up to 1.5 seconds.
*/
if (cancel_delayed_work_sync(&ce->guc_state.sched_disable_delay_work))
intel_context_sched_disable_unpin(ce);
else if (intel_context_is_closed(ce))
if (wait_for(context_close_done(ce), 1500))
guc_warn(guc, "timed out waiting on context sched close before realloc\n");
/*
* Call pin_guc_id here rather than in the pinning step as with
* dma_resv, contexts can be repeatedly pinned / unpinned trashing the
* guc_id and creating horrible race conditions. This is especially bad
* when guc_id are being stolen due to over subscription. By the time
* this function is reached, it is guaranteed that the guc_id will be
* persistent until the generated request is retired. Thus, sealing these
* race conditions. It is still safe to fail here if guc_id are
* exhausted and return -EAGAIN to the user indicating that they can try
* again in the future.
*
* There is no need for a lock here as the timeline mutex ensures at
* most one context can be executing this code path at once. The
* guc_id_ref is incremented once for every request in flight and
* decremented on each retire. When it is zero, a lock around the
* increment (in pin_guc_id) is needed to seal a race with unpin_guc_id.
*/
if (atomic_add_unless(&ce->guc_id.ref, 1, 0))
goto out;
ret = pin_guc_id(guc, ce); /* returns 1 if new guc_id assigned */
if (unlikely(ret < 0))
return ret;
if (context_needs_register(ce, !!ret)) {
ret = try_context_registration(ce, true);
if (unlikely(ret)) { /* unwind */
if (ret == -EPIPE) {
disable_submission(guc);
goto out; /* GPU will be reset */
}
atomic_dec(&ce->guc_id.ref);
unpin_guc_id(guc, ce);
return ret;
}
}
clear_bit(CONTEXT_LRCA_DIRTY, &ce->flags);
out:
/*
* We block all requests on this context if a G2H is pending for a
* schedule disable or context deregistration as the GuC will fail a
* schedule enable or context registration if either G2H is pending
* respectfully. Once a G2H returns, the fence is released that is
* blocking these requests (see guc_signal_context_fence).
*/
spin_lock_irqsave(&ce->guc_state.lock, flags);
if (context_wait_for_deregister_to_register(ce) ||
context_pending_disable(ce)) {
init_irq_work(&rq->submit_work, submit_work_cb);
i915_sw_fence_await(&rq->submit);
list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);
}
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
return 0;
}
static int guc_virtual_context_pre_pin(struct intel_context *ce,
struct i915_gem_ww_ctx *ww,
void **vaddr)
{
struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
return __guc_context_pre_pin(ce, engine, ww, vaddr);
}
static int guc_virtual_context_pin(struct intel_context *ce, void *vaddr)
{
struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
int ret = __guc_context_pin(ce, engine, vaddr);
intel_engine_mask_t tmp, mask = ce->engine->mask;
if (likely(!ret))
for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
intel_engine_pm_get(engine);
return ret;
}
static void guc_virtual_context_unpin(struct intel_context *ce)
{
intel_engine_mask_t tmp, mask = ce->engine->mask;
struct intel_engine_cs *engine;
struct intel_guc *guc = ce_to_guc(ce);
GEM_BUG_ON(context_enabled(ce));
GEM_BUG_ON(intel_context_is_barrier(ce));
unpin_guc_id(guc, ce);
lrc_unpin(ce);
for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
intel_engine_pm_put_async(engine);
}
static void guc_virtual_context_enter(struct intel_context *ce)
{
intel_engine_mask_t tmp, mask = ce->engine->mask;
struct intel_engine_cs *engine;
for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
intel_engine_pm_get(engine);
intel_timeline_enter(ce->timeline);
}
static void guc_virtual_context_exit(struct intel_context *ce)
{
intel_engine_mask_t tmp, mask = ce->engine->mask;
struct intel_engine_cs *engine;
for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
intel_engine_pm_put(engine);
intel_timeline_exit(ce->timeline);
}
static int guc_virtual_context_alloc(struct intel_context *ce)
{
struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
return lrc_alloc(ce, engine);
}
static const struct intel_context_ops virtual_guc_context_ops = {
.flags = COPS_RUNTIME_CYCLES,
.alloc = guc_virtual_context_alloc,
.close = guc_context_close,
.pre_pin = guc_virtual_context_pre_pin,
.pin = guc_virtual_context_pin,
.unpin = guc_virtual_context_unpin,
.post_unpin = guc_context_post_unpin,
.revoke = guc_context_revoke,
.cancel_request = guc_context_cancel_request,
.enter = guc_virtual_context_enter,
.exit = guc_virtual_context_exit,
.sched_disable = guc_context_sched_disable,
.update_stats = guc_context_update_stats,
.destroy = guc_context_destroy,
.get_sibling = guc_virtual_get_sibling,
};
static int guc_parent_context_pin(struct intel_context *ce, void *vaddr)
{
struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
struct intel_guc *guc = ce_to_guc(ce);
int ret;
GEM_BUG_ON(!intel_context_is_parent(ce));
GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
ret = pin_guc_id(guc, ce);
if (unlikely(ret < 0))
return ret;
return __guc_context_pin(ce, engine, vaddr);
}
static int guc_child_context_pin(struct intel_context *ce, void *vaddr)
{
struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
GEM_BUG_ON(!intel_context_is_child(ce));
GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
__intel_context_pin(ce->parallel.parent);
return __guc_context_pin(ce, engine, vaddr);
}
static void guc_parent_context_unpin(struct intel_context *ce)
{
struct intel_guc *guc = ce_to_guc(ce);
GEM_BUG_ON(context_enabled(ce));
GEM_BUG_ON(intel_context_is_barrier(ce));
GEM_BUG_ON(!intel_context_is_parent(ce));
GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
unpin_guc_id(guc, ce);
lrc_unpin(ce);
}
static void guc_child_context_unpin(struct intel_context *ce)
{
GEM_BUG_ON(context_enabled(ce));
GEM_BUG_ON(intel_context_is_barrier(ce));
GEM_BUG_ON(!intel_context_is_child(ce));
GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
lrc_unpin(ce);
}
static void guc_child_context_post_unpin(struct intel_context *ce)
{
GEM_BUG_ON(!intel_context_is_child(ce));
GEM_BUG_ON(!intel_context_is_pinned(ce->parallel.parent));
GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
lrc_post_unpin(ce);
intel_context_unpin(ce->parallel.parent);
}
static void guc_child_context_destroy(struct kref *kref)
{
struct intel_context *ce = container_of(kref, typeof(*ce), ref);
__guc_context_destroy(ce);
}
static const struct intel_context_ops virtual_parent_context_ops = {
.alloc = guc_virtual_context_alloc,
.close = guc_context_close,
.pre_pin = guc_context_pre_pin,
.pin = guc_parent_context_pin,
.unpin = guc_parent_context_unpin,
.post_unpin = guc_context_post_unpin,
.revoke = guc_context_revoke,
.cancel_request = guc_context_cancel_request,
.enter = guc_virtual_context_enter,
.exit = guc_virtual_context_exit,
.sched_disable = guc_context_sched_disable,
.destroy = guc_context_destroy,
.get_sibling = guc_virtual_get_sibling,
};
static const struct intel_context_ops virtual_child_context_ops = {
.alloc = guc_virtual_context_alloc,
.pre_pin = guc_context_pre_pin,
.pin = guc_child_context_pin,
.unpin = guc_child_context_unpin,
.post_unpin = guc_child_context_post_unpin,
.cancel_request = guc_context_cancel_request,
.enter = guc_virtual_context_enter,
.exit = guc_virtual_context_exit,
.destroy = guc_child_context_destroy,
.get_sibling = guc_virtual_get_sibling,
};
/*
* The below override of the breadcrumbs is enabled when the user configures a
* context for parallel submission (multi-lrc, parent-child).
*
* The overridden breadcrumbs implements an algorithm which allows the GuC to
* safely preempt all the hw contexts configured for parallel submission
* between each BB. The contract between the i915 and GuC is if the parent
* context can be preempted, all the children can be preempted, and the GuC will
* always try to preempt the parent before the children. A handshake between the
* parent / children breadcrumbs ensures the i915 holds up its end of the deal
* creating a window to preempt between each set of BBs.
*/
static int emit_bb_start_parent_no_preempt_mid_batch(struct i915_request *rq,
u64 offset, u32 len,
const unsigned int flags);
static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq,
u64 offset, u32 len,
const unsigned int flags);
static u32 *
emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
u32 *cs);
static u32 *
emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
u32 *cs);
static struct intel_context *
guc_create_parallel(struct intel_engine_cs **engines,
unsigned int num_siblings,
unsigned int width)
{
struct intel_engine_cs **siblings = NULL;
struct intel_context *parent = NULL, *ce, *err;
int i, j;
siblings = kmalloc_array(num_siblings,
sizeof(*siblings),
GFP_KERNEL);
if (!siblings)
return ERR_PTR(-ENOMEM);
for (i = 0; i < width; ++i) {
for (j = 0; j < num_siblings; ++j)
siblings[j] = engines[i * num_siblings + j];
ce = intel_engine_create_virtual(siblings, num_siblings,
FORCE_VIRTUAL);
if (IS_ERR(ce)) {
err = ERR_CAST(ce);
goto unwind;
}
if (i == 0) {
parent = ce;
parent->ops = &virtual_parent_context_ops;
} else {
ce->ops = &virtual_child_context_ops;
intel_context_bind_parent_child(parent, ce);
}
}
parent->parallel.fence_context = dma_fence_context_alloc(1);
parent->engine->emit_bb_start =
emit_bb_start_parent_no_preempt_mid_batch;
parent->engine->emit_fini_breadcrumb =
emit_fini_breadcrumb_parent_no_preempt_mid_batch;
parent->engine->emit_fini_breadcrumb_dw =
12 + 4 * parent->parallel.number_children;
for_each_child(parent, ce) {
ce->engine->emit_bb_start =
emit_bb_start_child_no_preempt_mid_batch;
ce->engine->emit_fini_breadcrumb =
emit_fini_breadcrumb_child_no_preempt_mid_batch;
ce->engine->emit_fini_breadcrumb_dw = 16;
}
kfree(siblings);
return parent;
unwind:
if (parent)
intel_context_put(parent);
kfree(siblings);
return err;
}
static bool
guc_irq_enable_breadcrumbs(struct intel_breadcrumbs *b)
{
struct intel_engine_cs *sibling;
intel_engine_mask_t tmp, mask = b->engine_mask;
bool result = false;
for_each_engine_masked(sibling, b->irq_engine->gt, mask, tmp)
result |= intel_engine_irq_enable(sibling);
return result;
}
static void
guc_irq_disable_breadcrumbs(struct intel_breadcrumbs *b)
{
struct intel_engine_cs *sibling;
intel_engine_mask_t tmp, mask = b->engine_mask;
for_each_engine_masked(sibling, b->irq_engine->gt, mask, tmp)
intel_engine_irq_disable(sibling);
}
static void guc_init_breadcrumbs(struct intel_engine_cs *engine)
{
int i;
/*
* In GuC submission mode we do not know which physical engine a request
* will be scheduled on, this creates a problem because the breadcrumb
* interrupt is per physical engine. To work around this we attach
* requests and direct all breadcrumb interrupts to the first instance
* of an engine per class. In addition all breadcrumb interrupts are
* enabled / disabled across an engine class in unison.
*/
for (i = 0; i < MAX_ENGINE_INSTANCE; ++i) {
struct intel_engine_cs *sibling =
engine->gt->engine_class[engine->class][i];
if (sibling) {
if (engine->breadcrumbs != sibling->breadcrumbs) {
intel_breadcrumbs_put(engine->breadcrumbs);
engine->breadcrumbs =
intel_breadcrumbs_get(sibling->breadcrumbs);
}
break;
}
}
if (engine->breadcrumbs) {
engine->breadcrumbs->engine_mask |= engine->mask;
engine->breadcrumbs->irq_enable = guc_irq_enable_breadcrumbs;
engine->breadcrumbs->irq_disable = guc_irq_disable_breadcrumbs;
}
}
static void guc_bump_inflight_request_prio(struct i915_request *rq,
int prio)
{
struct intel_context *ce = request_to_scheduling_context(rq);
u8 new_guc_prio = map_i915_prio_to_guc_prio(prio);
/* Short circuit function */
if (prio < I915_PRIORITY_NORMAL ||
rq->guc_prio == GUC_PRIO_FINI ||
(rq->guc_prio != GUC_PRIO_INIT &&
!new_guc_prio_higher(rq->guc_prio, new_guc_prio)))
return;
spin_lock(&ce->guc_state.lock);
if (rq->guc_prio != GUC_PRIO_FINI) {
if (rq->guc_prio != GUC_PRIO_INIT)
sub_context_inflight_prio(ce, rq->guc_prio);
rq->guc_prio = new_guc_prio;
add_context_inflight_prio(ce, rq->guc_prio);
update_context_prio(ce);
}
spin_unlock(&ce->guc_state.lock);
}
static void guc_retire_inflight_request_prio(struct i915_request *rq)
{
struct intel_context *ce = request_to_scheduling_context(rq);
spin_lock(&ce->guc_state.lock);
guc_prio_fini(rq, ce);
spin_unlock(&ce->guc_state.lock);
}
static void sanitize_hwsp(struct intel_engine_cs *engine)
{
struct intel_timeline *tl;
list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
intel_timeline_reset_seqno(tl);
}
static void guc_sanitize(struct intel_engine_cs *engine)
{
/*
* Poison residual state on resume, in case the suspend didn't!
*
* We have to assume that across suspend/resume (or other loss
* of control) that the contents of our pinned buffers has been
* lost, replaced by garbage. Since this doesn't always happen,
* let's poison such state so that we more quickly spot when
* we falsely assume it has been preserved.
*/
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
/*
* The kernel_context HWSP is stored in the status_page. As above,
* that may be lost on resume/initialisation, and so we need to
* reset the value in the HWSP.
*/
sanitize_hwsp(engine);
/* And scrub the dirty cachelines for the HWSP */
drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE);
intel_engine_reset_pinned_contexts(engine);
}
static void setup_hwsp(struct intel_engine_cs *engine)
{
intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
ENGINE_WRITE_FW(engine,
RING_HWS_PGA,
i915_ggtt_offset(engine->status_page.vma));
}
static void start_engine(struct intel_engine_cs *engine)
{
ENGINE_WRITE_FW(engine,
RING_MODE_GEN7,
_MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
ENGINE_POSTING_READ(engine, RING_MI_MODE);
}
static int guc_resume(struct intel_engine_cs *engine)
{
assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
intel_mocs_init_engine(engine);
intel_breadcrumbs_reset(engine->breadcrumbs);
setup_hwsp(engine);
start_engine(engine);
if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE)
xehp_enable_ccs_engines(engine);
return 0;
}
static bool guc_sched_engine_disabled(struct i915_sched_engine *sched_engine)
{
return !sched_engine->tasklet.callback;
}
static void guc_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = guc_submit_request;
}
static inline int guc_kernel_context_pin(struct intel_guc *guc,
struct intel_context *ce)
{
int ret;
/*
* Note: we purposefully do not check the returns below because
* the registration can only fail if a reset is just starting.
* This is called at the end of reset so presumably another reset
* isn't happening and even it did this code would be run again.
*/
if (context_guc_id_invalid(ce)) {
ret = pin_guc_id(guc, ce);
if (ret < 0)
return ret;
}
if (!test_bit(CONTEXT_GUC_INIT, &ce->flags))
guc_context_init(ce);
ret = try_context_registration(ce, true);
if (ret)
unpin_guc_id(guc, ce);
return ret;
}
static inline int guc_init_submission(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
struct intel_engine_cs *engine;
enum intel_engine_id id;
/* make sure all descriptors are clean... */
xa_destroy(&guc->context_lookup);
/*
* A reset might have occurred while we had a pending stalled request,
* so make sure we clean that up.
*/
guc->stalled_request = NULL;
guc->submission_stall_reason = STALL_NONE;
/*
* Some contexts might have been pinned before we enabled GuC
* submission, so we need to add them to the GuC bookeeping.
* Also, after a reset the of the GuC we want to make sure that the
* information shared with GuC is properly reset. The kernel LRCs are
* not attached to the gem_context, so they need to be added separately.
*/
for_each_engine(engine, gt, id) {
struct intel_context *ce;
list_for_each_entry(ce, &engine->pinned_contexts_list,
pinned_contexts_link) {
int ret = guc_kernel_context_pin(guc, ce);
if (ret) {
/* No point in trying to clean up as i915 will wedge on failure */
return ret;
}
}
}
return 0;
}
static void guc_release(struct intel_engine_cs *engine)
{
engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
intel_engine_cleanup_common(engine);
lrc_fini_wa_ctx(engine);
}
static void virtual_guc_bump_serial(struct intel_engine_cs *engine)
{
struct intel_engine_cs *e;
intel_engine_mask_t tmp, mask = engine->mask;
for_each_engine_masked(e, engine->gt, mask, tmp)
e->serial++;
}
static void guc_default_vfuncs(struct intel_engine_cs *engine)
{
/* Default vfuncs which can be overridden by each engine. */
engine->resume = guc_resume;
engine->cops = &guc_context_ops;
engine->request_alloc = guc_request_alloc;
engine->add_active_request = add_to_context;
engine->remove_active_request = remove_from_context;
engine->sched_engine->schedule = i915_schedule;
engine->reset.prepare = guc_engine_reset_prepare;
engine->reset.rewind = guc_rewind_nop;
engine->reset.cancel = guc_reset_nop;
engine->reset.finish = guc_reset_nop;
engine->emit_flush = gen8_emit_flush_xcs;
engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
if (GRAPHICS_VER(engine->i915) >= 12) {
engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs;
engine->emit_flush = gen12_emit_flush_xcs;
}
engine->set_default_submission = guc_set_default_submission;
engine->busyness = guc_engine_busyness;
engine->flags |= I915_ENGINE_SUPPORTS_STATS;
engine->flags |= I915_ENGINE_HAS_PREEMPTION;
engine->flags |= I915_ENGINE_HAS_TIMESLICES;
/* Wa_14014475959:dg2 */
if (engine->class == COMPUTE_CLASS)
if (IS_MTL_GRAPHICS_STEP(engine->i915, M, STEP_A0, STEP_B0) ||
IS_DG2(engine->i915))
engine->flags |= I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT;
/*
* TODO: GuC supports timeslicing and semaphores as well, but they're
* handled by the firmware so some minor tweaks are required before
* enabling.
*
* engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
*/
engine->emit_bb_start = gen8_emit_bb_start;
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
engine->emit_bb_start = xehp_emit_bb_start;
}
static void rcs_submission_override(struct intel_engine_cs *engine)
{
switch (GRAPHICS_VER(engine->i915)) {
case 12:
engine->emit_flush = gen12_emit_flush_rcs;
engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
break;
case 11:
engine->emit_flush = gen11_emit_flush_rcs;
engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs;
break;
default:
engine->emit_flush = gen8_emit_flush_rcs;
engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
break;
}
}
static inline void guc_default_irqs(struct intel_engine_cs *engine)
{
engine->irq_keep_mask = GT_RENDER_USER_INTERRUPT;
intel_engine_set_irq_handler(engine, cs_irq_handler);
}
static void guc_sched_engine_destroy(struct kref *kref)
{
struct i915_sched_engine *sched_engine =
container_of(kref, typeof(*sched_engine), ref);
struct intel_guc *guc = sched_engine->private_data;
guc->sched_engine = NULL;
tasklet_kill(&sched_engine->tasklet); /* flush the callback */
kfree(sched_engine);
}
int intel_guc_submission_setup(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
struct intel_guc *guc = &engine->gt->uc.guc;
/*
* The setup relies on several assumptions (e.g. irqs always enabled)
* that are only valid on gen11+
*/
GEM_BUG_ON(GRAPHICS_VER(i915) < 11);
if (!guc->sched_engine) {
guc->sched_engine = i915_sched_engine_create(ENGINE_VIRTUAL);
if (!guc->sched_engine)
return -ENOMEM;
guc->sched_engine->schedule = i915_schedule;
guc->sched_engine->disabled = guc_sched_engine_disabled;
guc->sched_engine->private_data = guc;
guc->sched_engine->destroy = guc_sched_engine_destroy;
guc->sched_engine->bump_inflight_request_prio =
guc_bump_inflight_request_prio;
guc->sched_engine->retire_inflight_request_prio =
guc_retire_inflight_request_prio;
tasklet_setup(&guc->sched_engine->tasklet,
guc_submission_tasklet);
}
i915_sched_engine_put(engine->sched_engine);
engine->sched_engine = i915_sched_engine_get(guc->sched_engine);
guc_default_vfuncs(engine);
guc_default_irqs(engine);
guc_init_breadcrumbs(engine);
if (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE)
rcs_submission_override(engine);
lrc_init_wa_ctx(engine);
/* Finally, take ownership and responsibility for cleanup! */
engine->sanitize = guc_sanitize;
engine->release = guc_release;
return 0;
}
struct scheduling_policy {
/* internal data */
u32 max_words, num_words;
u32 count;
/* API data */
struct guc_update_scheduling_policy h2g;
};
static u32 __guc_scheduling_policy_action_size(struct scheduling_policy *policy)
{
u32 *start = (void *)&policy->h2g;
u32 *end = policy->h2g.data + policy->num_words;
size_t delta = end - start;
return delta;
}
static struct scheduling_policy *__guc_scheduling_policy_start_klv(struct scheduling_policy *policy)
{
policy->h2g.header.action = INTEL_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV;
policy->max_words = ARRAY_SIZE(policy->h2g.data);
policy->num_words = 0;
policy->count = 0;
return policy;
}
static void __guc_scheduling_policy_add_klv(struct scheduling_policy *policy,
u32 action, u32 *data, u32 len)
{
u32 *klv_ptr = policy->h2g.data + policy->num_words;
GEM_BUG_ON((policy->num_words + 1 + len) > policy->max_words);
*(klv_ptr++) = FIELD_PREP(GUC_KLV_0_KEY, action) |
FIELD_PREP(GUC_KLV_0_LEN, len);
memcpy(klv_ptr, data, sizeof(u32) * len);
policy->num_words += 1 + len;
policy->count++;
}
static int __guc_action_set_scheduling_policies(struct intel_guc *guc,
struct scheduling_policy *policy)
{
int ret;
ret = intel_guc_send(guc, (u32 *)&policy->h2g,
__guc_scheduling_policy_action_size(policy));
if (ret < 0) {
guc_probe_error(guc, "Failed to configure global scheduling policies: %pe!\n",
ERR_PTR(ret));
return ret;
}
if (ret != policy->count) {
guc_warn(guc, "global scheduler policy processed %d of %d KLVs!",
ret, policy->count);
if (ret > policy->count)
return -EPROTO;
}
return 0;
}
static int guc_init_global_schedule_policy(struct intel_guc *guc)
{
struct scheduling_policy policy;
struct intel_gt *gt = guc_to_gt(guc);
intel_wakeref_t wakeref;
int ret;
if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER(1, 1, 0))
return 0;
__guc_scheduling_policy_start_klv(&policy);
with_intel_runtime_pm(>->i915->runtime_pm, wakeref) {
u32 yield[] = {
GLOBAL_SCHEDULE_POLICY_RC_YIELD_DURATION,
GLOBAL_SCHEDULE_POLICY_RC_YIELD_RATIO,
};
__guc_scheduling_policy_add_klv(&policy,
GUC_SCHEDULING_POLICIES_KLV_ID_RENDER_COMPUTE_YIELD,
yield, ARRAY_SIZE(yield));
ret = __guc_action_set_scheduling_policies(guc, &policy);
}
return ret;
}
static void guc_route_semaphores(struct intel_guc *guc, bool to_guc)
{
struct intel_gt *gt = guc_to_gt(guc);
u32 val;
if (GRAPHICS_VER(gt->i915) < 12)
return;
if (to_guc)
val = GUC_SEM_INTR_ROUTE_TO_GUC | GUC_SEM_INTR_ENABLE_ALL;
else
val = 0;
intel_uncore_write(gt->uncore, GEN12_GUC_SEM_INTR_ENABLES, val);
}
int intel_guc_submission_enable(struct intel_guc *guc)
{
int ret;
/* Semaphore interrupt enable and route to GuC */
guc_route_semaphores(guc, true);
ret = guc_init_submission(guc);
if (ret)
goto fail_sem;
ret = guc_init_engine_stats(guc);
if (ret)
goto fail_sem;
ret = guc_init_global_schedule_policy(guc);
if (ret)
goto fail_stats;
return 0;
fail_stats:
guc_fini_engine_stats(guc);
fail_sem:
guc_route_semaphores(guc, false);
return ret;
}
/* Note: By the time we're here, GuC may have already been reset */
void intel_guc_submission_disable(struct intel_guc *guc)
{
guc_cancel_busyness_worker(guc);
/* Semaphore interrupt disable and route to host */
guc_route_semaphores(guc, false);
}
static bool __guc_submission_supported(struct intel_guc *guc)
{
/* GuC submission is unavailable for pre-Gen11 */
return intel_guc_is_supported(guc) &&
GRAPHICS_VER(guc_to_gt(guc)->i915) >= 11;
}
static bool __guc_submission_selected(struct intel_guc *guc)
{
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
if (!intel_guc_submission_is_supported(guc))
return false;
return i915->params.enable_guc & ENABLE_GUC_SUBMISSION;
}
int intel_guc_sched_disable_gucid_threshold_max(struct intel_guc *guc)
{
return guc->submission_state.num_guc_ids - NUMBER_MULTI_LRC_GUC_ID(guc);
}
/*
* This default value of 33 milisecs (+1 milisec round up) ensures 30fps or higher
* workloads are able to enjoy the latency reduction when delaying the schedule-disable
* operation. This matches the 30fps game-render + encode (real world) workload this
* knob was tested against.
*/
#define SCHED_DISABLE_DELAY_MS 34
/*
* A threshold of 75% is a reasonable starting point considering that real world apps
* generally don't get anywhere near this.
*/
#define NUM_SCHED_DISABLE_GUCIDS_DEFAULT_THRESHOLD(__guc) \
(((intel_guc_sched_disable_gucid_threshold_max(guc)) * 3) / 4)
void intel_guc_submission_init_early(struct intel_guc *guc)
{
xa_init_flags(&guc->context_lookup, XA_FLAGS_LOCK_IRQ);
spin_lock_init(&guc->submission_state.lock);
INIT_LIST_HEAD(&guc->submission_state.guc_id_list);
ida_init(&guc->submission_state.guc_ids);
INIT_LIST_HEAD(&guc->submission_state.destroyed_contexts);
INIT_WORK(&guc->submission_state.destroyed_worker,
destroyed_worker_func);
INIT_WORK(&guc->submission_state.reset_fail_worker,
reset_fail_worker_func);
spin_lock_init(&guc->timestamp.lock);
INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
guc->submission_state.sched_disable_delay_ms = SCHED_DISABLE_DELAY_MS;
guc->submission_state.num_guc_ids = GUC_MAX_CONTEXT_ID;
guc->submission_state.sched_disable_gucid_threshold =
NUM_SCHED_DISABLE_GUCIDS_DEFAULT_THRESHOLD(guc);
guc->submission_supported = __guc_submission_supported(guc);
guc->submission_selected = __guc_submission_selected(guc);
}
static inline struct intel_context *
g2h_context_lookup(struct intel_guc *guc, u32 ctx_id)
{
struct intel_context *ce;
if (unlikely(ctx_id >= GUC_MAX_CONTEXT_ID)) {
guc_err(guc, "Invalid ctx_id %u\n", ctx_id);
return NULL;
}
ce = __get_context(guc, ctx_id);
if (unlikely(!ce)) {
guc_err(guc, "Context is NULL, ctx_id %u\n", ctx_id);
return NULL;
}
if (unlikely(intel_context_is_child(ce))) {
guc_err(guc, "Context is child, ctx_id %u\n", ctx_id);
return NULL;
}
return ce;
}
int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
const u32 *msg,
u32 len)
{
struct intel_context *ce;
u32 ctx_id;
if (unlikely(len < 1)) {
guc_err(guc, "Invalid length %u\n", len);
return -EPROTO;
}
ctx_id = msg[0];
ce = g2h_context_lookup(guc, ctx_id);
if (unlikely(!ce))
return -EPROTO;
trace_intel_context_deregister_done(ce);
#ifdef CONFIG_DRM_I915_SELFTEST
if (unlikely(ce->drop_deregister)) {
ce->drop_deregister = false;
return 0;
}
#endif
if (context_wait_for_deregister_to_register(ce)) {
struct intel_runtime_pm *runtime_pm =
&ce->engine->gt->i915->runtime_pm;
intel_wakeref_t wakeref;
/*
* Previous owner of this guc_id has been deregistered, now safe
* register this context.
*/
with_intel_runtime_pm(runtime_pm, wakeref)
register_context(ce, true);
guc_signal_context_fence(ce);
intel_context_put(ce);
} else if (context_destroyed(ce)) {
/* Context has been destroyed */
intel_gt_pm_put_async(guc_to_gt(guc));
release_guc_id(guc, ce);
__guc_context_destroy(ce);
}
decr_outstanding_submission_g2h(guc);
return 0;
}
int intel_guc_sched_done_process_msg(struct intel_guc *guc,
const u32 *msg,
u32 len)
{
struct intel_context *ce;
unsigned long flags;
u32 ctx_id;
if (unlikely(len < 2)) {
guc_err(guc, "Invalid length %u\n", len);
return -EPROTO;
}
ctx_id = msg[0];
ce = g2h_context_lookup(guc, ctx_id);
if (unlikely(!ce))
return -EPROTO;
if (unlikely(context_destroyed(ce) ||
(!context_pending_enable(ce) &&
!context_pending_disable(ce)))) {
guc_err(guc, "Bad context sched_state 0x%x, ctx_id %u\n",
ce->guc_state.sched_state, ctx_id);
return -EPROTO;
}
trace_intel_context_sched_done(ce);
if (context_pending_enable(ce)) {
#ifdef CONFIG_DRM_I915_SELFTEST
if (unlikely(ce->drop_schedule_enable)) {
ce->drop_schedule_enable = false;
return 0;
}
#endif
spin_lock_irqsave(&ce->guc_state.lock, flags);
clr_context_pending_enable(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
} else if (context_pending_disable(ce)) {
bool banned;
#ifdef CONFIG_DRM_I915_SELFTEST
if (unlikely(ce->drop_schedule_disable)) {
ce->drop_schedule_disable = false;
return 0;
}
#endif
/*
* Unpin must be done before __guc_signal_context_fence,
* otherwise a race exists between the requests getting
* submitted + retired before this unpin completes resulting in
* the pin_count going to zero and the context still being
* enabled.
*/
intel_context_sched_disable_unpin(ce);
spin_lock_irqsave(&ce->guc_state.lock, flags);
banned = context_banned(ce);
clr_context_banned(ce);
clr_context_pending_disable(ce);
__guc_signal_context_fence(ce);
guc_blocked_fence_complete(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
if (banned) {
guc_cancel_context_requests(ce);
intel_engine_signal_breadcrumbs(ce->engine);
}
}
decr_outstanding_submission_g2h(guc);
intel_context_put(ce);
return 0;
}
static void capture_error_state(struct intel_guc *guc,
struct intel_context *ce)
{
struct intel_gt *gt = guc_to_gt(guc);
struct drm_i915_private *i915 = gt->i915;
intel_wakeref_t wakeref;
intel_engine_mask_t engine_mask;
if (intel_engine_is_virtual(ce->engine)) {
struct intel_engine_cs *e;
intel_engine_mask_t tmp, virtual_mask = ce->engine->mask;
engine_mask = 0;
for_each_engine_masked(e, ce->engine->gt, virtual_mask, tmp) {
bool match = intel_guc_capture_is_matching_engine(gt, ce, e);
if (match) {
intel_engine_set_hung_context(e, ce);
engine_mask |= e->mask;
atomic_inc(&i915->gpu_error.reset_engine_count[e->uabi_class]);
}
}
if (!engine_mask) {
guc_warn(guc, "No matching physical engine capture for virtual engine context 0x%04X / %s",
ce->guc_id.id, ce->engine->name);
engine_mask = ~0U;
}
} else {
intel_engine_set_hung_context(ce->engine, ce);
engine_mask = ce->engine->mask;
atomic_inc(&i915->gpu_error.reset_engine_count[ce->engine->uabi_class]);
}
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
i915_capture_error_state(gt, engine_mask, CORE_DUMP_FLAG_IS_GUC_CAPTURE);
}
static void guc_context_replay(struct intel_context *ce)
{
struct i915_sched_engine *sched_engine = ce->engine->sched_engine;
__guc_reset_context(ce, ce->engine->mask);
tasklet_hi_schedule(&sched_engine->tasklet);
}
static void guc_handle_context_reset(struct intel_guc *guc,
struct intel_context *ce)
{
trace_intel_context_reset(ce);
guc_dbg(guc, "Got context reset notification: 0x%04X on %s, exiting = %s, banned = %s\n",
ce->guc_id.id, ce->engine->name,
str_yes_no(intel_context_is_exiting(ce)),
str_yes_no(intel_context_is_banned(ce)));
if (likely(intel_context_is_schedulable(ce))) {
capture_error_state(guc, ce);
guc_context_replay(ce);
} else {
guc_info(guc, "Ignoring context reset notification of exiting context 0x%04X on %s",
ce->guc_id.id, ce->engine->name);
}
}
int intel_guc_context_reset_process_msg(struct intel_guc *guc,
const u32 *msg, u32 len)
{
struct intel_context *ce;
unsigned long flags;
int ctx_id;
if (unlikely(len != 1)) {
guc_err(guc, "Invalid length %u", len);
return -EPROTO;
}
ctx_id = msg[0];
/*
* The context lookup uses the xarray but lookups only require an RCU lock
* not the full spinlock. So take the lock explicitly and keep it until the
* context has been reference count locked to ensure it can't be destroyed
* asynchronously until the reset is done.
*/
xa_lock_irqsave(&guc->context_lookup, flags);
ce = g2h_context_lookup(guc, ctx_id);
if (ce)
intel_context_get(ce);
xa_unlock_irqrestore(&guc->context_lookup, flags);
if (unlikely(!ce))
return -EPROTO;
guc_handle_context_reset(guc, ce);
intel_context_put(ce);
return 0;
}
int intel_guc_error_capture_process_msg(struct intel_guc *guc,
const u32 *msg, u32 len)
{
u32 status;
if (unlikely(len != 1)) {
guc_dbg(guc, "Invalid length %u", len);
return -EPROTO;
}
status = msg[0] & INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_MASK;
if (status == INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE)
guc_warn(guc, "No space for error capture");
intel_guc_capture_process(guc);
return 0;
}
struct intel_engine_cs *
intel_guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance)
{
struct intel_gt *gt = guc_to_gt(guc);
u8 engine_class = guc_class_to_engine_class(guc_class);
/* Class index is checked in class converter */
GEM_BUG_ON(instance > MAX_ENGINE_INSTANCE);
return gt->engine_class[engine_class][instance];
}
static void reset_fail_worker_func(struct work_struct *w)
{
struct intel_guc *guc = container_of(w, struct intel_guc,
submission_state.reset_fail_worker);
struct intel_gt *gt = guc_to_gt(guc);
intel_engine_mask_t reset_fail_mask;
unsigned long flags;
spin_lock_irqsave(&guc->submission_state.lock, flags);
reset_fail_mask = guc->submission_state.reset_fail_mask;
guc->submission_state.reset_fail_mask = 0;
spin_unlock_irqrestore(&guc->submission_state.lock, flags);
if (likely(reset_fail_mask)) {
struct intel_engine_cs *engine;
enum intel_engine_id id;
/*
* GuC is toast at this point - it dead loops after sending the failed
* reset notification. So need to manually determine the guilty context.
* Note that it should be reliable to do this here because the GuC is
* toast and will not be scheduling behind the KMD's back.
*/
for_each_engine_masked(engine, gt, reset_fail_mask, id)
intel_guc_find_hung_context(engine);
intel_gt_handle_error(gt, reset_fail_mask,
I915_ERROR_CAPTURE,
"GuC failed to reset engine mask=0x%x",
reset_fail_mask);
}
}
int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
const u32 *msg, u32 len)
{
struct intel_engine_cs *engine;
u8 guc_class, instance;
u32 reason;
unsigned long flags;
if (unlikely(len != 3)) {
guc_err(guc, "Invalid length %u", len);
return -EPROTO;
}
guc_class = msg[0];
instance = msg[1];
reason = msg[2];
engine = intel_guc_lookup_engine(guc, guc_class, instance);
if (unlikely(!engine)) {
guc_err(guc, "Invalid engine %d:%d", guc_class, instance);
return -EPROTO;
}
/*
* This is an unexpected failure of a hardware feature. So, log a real
* error message not just the informational that comes with the reset.
*/
guc_err(guc, "Engine reset failed on %d:%d (%s) because 0x%08X",
guc_class, instance, engine->name, reason);
spin_lock_irqsave(&guc->submission_state.lock, flags);
guc->submission_state.reset_fail_mask |= engine->mask;
spin_unlock_irqrestore(&guc->submission_state.lock, flags);
/*
* A GT reset flushes this worker queue (G2H handler) so we must use
* another worker to trigger a GT reset.
*/
queue_work(system_unbound_wq, &guc->submission_state.reset_fail_worker);
return 0;
}
void intel_guc_find_hung_context(struct intel_engine_cs *engine)
{
struct intel_guc *guc = &engine->gt->uc.guc;
struct intel_context *ce;
struct i915_request *rq;
unsigned long index;
unsigned long flags;
/* Reset called during driver load? GuC not yet initialised! */
if (unlikely(!guc_submission_initialized(guc)))
return;
xa_lock_irqsave(&guc->context_lookup, flags);
xa_for_each(&guc->context_lookup, index, ce) {
bool found;
if (!kref_get_unless_zero(&ce->ref))
continue;
xa_unlock(&guc->context_lookup);
if (!intel_context_is_pinned(ce))
goto next;
if (intel_engine_is_virtual(ce->engine)) {
if (!(ce->engine->mask & engine->mask))
goto next;
} else {
if (ce->engine != engine)
goto next;
}
found = false;
spin_lock(&ce->guc_state.lock);
list_for_each_entry(rq, &ce->guc_state.requests, sched.link) {
if (i915_test_request_state(rq) != I915_REQUEST_ACTIVE)
continue;
found = true;
break;
}
spin_unlock(&ce->guc_state.lock);
if (found) {
intel_engine_set_hung_context(engine, ce);
/* Can only cope with one hang at a time... */
intel_context_put(ce);
xa_lock(&guc->context_lookup);
goto done;
}
next:
intel_context_put(ce);
xa_lock(&guc->context_lookup);
}
done:
xa_unlock_irqrestore(&guc->context_lookup, flags);
}
void intel_guc_dump_active_requests(struct intel_engine_cs *engine,
struct i915_request *hung_rq,
struct drm_printer *m)
{
struct intel_guc *guc = &engine->gt->uc.guc;
struct intel_context *ce;
unsigned long index;
unsigned long flags;
/* Reset called during driver load? GuC not yet initialised! */
if (unlikely(!guc_submission_initialized(guc)))
return;
xa_lock_irqsave(&guc->context_lookup, flags);
xa_for_each(&guc->context_lookup, index, ce) {
if (!kref_get_unless_zero(&ce->ref))
continue;
xa_unlock(&guc->context_lookup);
if (!intel_context_is_pinned(ce))
goto next;
if (intel_engine_is_virtual(ce->engine)) {
if (!(ce->engine->mask & engine->mask))
goto next;
} else {
if (ce->engine != engine)
goto next;
}
spin_lock(&ce->guc_state.lock);
intel_engine_dump_active_requests(&ce->guc_state.requests,
hung_rq, m);
spin_unlock(&ce->guc_state.lock);
next:
intel_context_put(ce);
xa_lock(&guc->context_lookup);
}
xa_unlock_irqrestore(&guc->context_lookup, flags);
}
void intel_guc_submission_print_info(struct intel_guc *guc,
struct drm_printer *p)
{
struct i915_sched_engine *sched_engine = guc->sched_engine;
struct rb_node *rb;
unsigned long flags;
if (!sched_engine)
return;
drm_printf(p, "GuC Submission API Version: %d.%d.%d\n",
guc->submission_version.major, guc->submission_version.minor,
guc->submission_version.patch);
drm_printf(p, "GuC Number Outstanding Submission G2H: %u\n",
atomic_read(&guc->outstanding_submission_g2h));
drm_printf(p, "GuC tasklet count: %u\n",
atomic_read(&sched_engine->tasklet.count));
spin_lock_irqsave(&sched_engine->lock, flags);
drm_printf(p, "Requests in GuC submit tasklet:\n");
for (rb = rb_first_cached(&sched_engine->queue); rb; rb = rb_next(rb)) {
struct i915_priolist *pl = to_priolist(rb);
struct i915_request *rq;
priolist_for_each_request(rq, pl)
drm_printf(p, "guc_id=%u, seqno=%llu\n",
rq->context->guc_id.id,
rq->fence.seqno);
}
spin_unlock_irqrestore(&sched_engine->lock, flags);
drm_printf(p, "\n");
}
static inline void guc_log_context_priority(struct drm_printer *p,
struct intel_context *ce)
{
int i;
drm_printf(p, "\t\tPriority: %d\n", ce->guc_state.prio);
drm_printf(p, "\t\tNumber Requests (lower index == higher priority)\n");
for (i = GUC_CLIENT_PRIORITY_KMD_HIGH;
i < GUC_CLIENT_PRIORITY_NUM; ++i) {
drm_printf(p, "\t\tNumber requests in priority band[%d]: %d\n",
i, ce->guc_state.prio_count[i]);
}
drm_printf(p, "\n");
}
static inline void guc_log_context(struct drm_printer *p,
struct intel_context *ce)
{
drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id.id);
drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca);
drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
ce->ring->head,
ce->lrc_reg_state[CTX_RING_HEAD]);
drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
ce->ring->tail,
ce->lrc_reg_state[CTX_RING_TAIL]);
drm_printf(p, "\t\tContext Pin Count: %u\n",
atomic_read(&ce->pin_count));
drm_printf(p, "\t\tGuC ID Ref Count: %u\n",
atomic_read(&ce->guc_id.ref));
drm_printf(p, "\t\tSchedule State: 0x%x\n",
ce->guc_state.sched_state);
}
void intel_guc_submission_print_context_info(struct intel_guc *guc,
struct drm_printer *p)
{
struct intel_context *ce;
unsigned long index;
unsigned long flags;
xa_lock_irqsave(&guc->context_lookup, flags);
xa_for_each(&guc->context_lookup, index, ce) {
GEM_BUG_ON(intel_context_is_child(ce));
guc_log_context(p, ce);
guc_log_context_priority(p, ce);
if (intel_context_is_parent(ce)) {
struct intel_context *child;
drm_printf(p, "\t\tNumber children: %u\n",
ce->parallel.number_children);
if (ce->parallel.guc.wq_status) {
drm_printf(p, "\t\tWQI Head: %u\n",
READ_ONCE(*ce->parallel.guc.wq_head));
drm_printf(p, "\t\tWQI Tail: %u\n",
READ_ONCE(*ce->parallel.guc.wq_tail));
drm_printf(p, "\t\tWQI Status: %u\n",
READ_ONCE(*ce->parallel.guc.wq_status));
}
if (ce->engine->emit_bb_start ==
emit_bb_start_parent_no_preempt_mid_batch) {
u8 i;
drm_printf(p, "\t\tChildren Go: %u\n",
get_children_go_value(ce));
for (i = 0; i < ce->parallel.number_children; ++i)
drm_printf(p, "\t\tChildren Join: %u\n",
get_children_join_value(ce, i));
}
for_each_child(ce, child)
guc_log_context(p, child);
}
}
xa_unlock_irqrestore(&guc->context_lookup, flags);
}
static inline u32 get_children_go_addr(struct intel_context *ce)
{
GEM_BUG_ON(!intel_context_is_parent(ce));
return i915_ggtt_offset(ce->state) +
__get_parent_scratch_offset(ce) +
offsetof(struct parent_scratch, go.semaphore);
}
static inline u32 get_children_join_addr(struct intel_context *ce,
u8 child_index)
{
GEM_BUG_ON(!intel_context_is_parent(ce));
return i915_ggtt_offset(ce->state) +
__get_parent_scratch_offset(ce) +
offsetof(struct parent_scratch, join[child_index].semaphore);
}
#define PARENT_GO_BB 1
#define PARENT_GO_FINI_BREADCRUMB 0
#define CHILD_GO_BB 1
#define CHILD_GO_FINI_BREADCRUMB 0
static int emit_bb_start_parent_no_preempt_mid_batch(struct i915_request *rq,
u64 offset, u32 len,
const unsigned int flags)
{
struct intel_context *ce = rq->context;
u32 *cs;
u8 i;
GEM_BUG_ON(!intel_context_is_parent(ce));
cs = intel_ring_begin(rq, 10 + 4 * ce->parallel.number_children);
if (IS_ERR(cs))
return PTR_ERR(cs);
/* Wait on children */
for (i = 0; i < ce->parallel.number_children; ++i) {
*cs++ = (MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_EQ_SDD);
*cs++ = PARENT_GO_BB;
*cs++ = get_children_join_addr(ce, i);
*cs++ = 0;
}
/* Turn off preemption */
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
*cs++ = MI_NOOP;
/* Tell children go */
cs = gen8_emit_ggtt_write(cs,
CHILD_GO_BB,
get_children_go_addr(ce),
0);
/* Jump to batch */
*cs++ = MI_BATCH_BUFFER_START_GEN8 |
(flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
*cs++ = lower_32_bits(offset);
*cs++ = upper_32_bits(offset);
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
return 0;
}
static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq,
u64 offset, u32 len,
const unsigned int flags)
{
struct intel_context *ce = rq->context;
struct intel_context *parent = intel_context_to_parent(ce);
u32 *cs;
GEM_BUG_ON(!intel_context_is_child(ce));
cs = intel_ring_begin(rq, 12);
if (IS_ERR(cs))
return PTR_ERR(cs);
/* Signal parent */
cs = gen8_emit_ggtt_write(cs,
PARENT_GO_BB,
get_children_join_addr(parent,
ce->parallel.child_index),
0);
/* Wait on parent for go */
*cs++ = (MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_EQ_SDD);
*cs++ = CHILD_GO_BB;
*cs++ = get_children_go_addr(parent);
*cs++ = 0;
/* Turn off preemption */
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
/* Jump to batch */
*cs++ = MI_BATCH_BUFFER_START_GEN8 |
(flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
*cs++ = lower_32_bits(offset);
*cs++ = upper_32_bits(offset);
intel_ring_advance(rq, cs);
return 0;
}
static u32 *
__emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
u32 *cs)
{
struct intel_context *ce = rq->context;
u8 i;
GEM_BUG_ON(!intel_context_is_parent(ce));
/* Wait on children */
for (i = 0; i < ce->parallel.number_children; ++i) {
*cs++ = (MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_EQ_SDD);
*cs++ = PARENT_GO_FINI_BREADCRUMB;
*cs++ = get_children_join_addr(ce, i);
*cs++ = 0;
}
/* Turn on preemption */
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
*cs++ = MI_NOOP;
/* Tell children go */
cs = gen8_emit_ggtt_write(cs,
CHILD_GO_FINI_BREADCRUMB,
get_children_go_addr(ce),
0);
return cs;
}
/*
* If this true, a submission of multi-lrc requests had an error and the
* requests need to be skipped. The front end (execuf IOCTL) should've called
* i915_request_skip which squashes the BB but we still need to emit the fini
* breadrcrumbs seqno write. At this point we don't know how many of the
* requests in the multi-lrc submission were generated so we can't do the
* handshake between the parent and children (e.g. if 4 requests should be
* generated but 2nd hit an error only 1 would be seen by the GuC backend).
* Simply skip the handshake, but still emit the breadcrumbd seqno, if an error
* has occurred on any of the requests in submission / relationship.
*/
static inline bool skip_handshake(struct i915_request *rq)
{
return test_bit(I915_FENCE_FLAG_SKIP_PARALLEL, &rq->fence.flags);
}
#define NON_SKIP_LEN 6
static u32 *
emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
u32 *cs)
{
struct intel_context *ce = rq->context;
__maybe_unused u32 *before_fini_breadcrumb_user_interrupt_cs;
__maybe_unused u32 *start_fini_breadcrumb_cs = cs;
GEM_BUG_ON(!intel_context_is_parent(ce));
if (unlikely(skip_handshake(rq))) {
/*
* NOP everything in __emit_fini_breadcrumb_parent_no_preempt_mid_batch,
* the NON_SKIP_LEN comes from the length of the emits below.
*/
memset(cs, 0, sizeof(u32) *
(ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN));
cs += ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN;
} else {
cs = __emit_fini_breadcrumb_parent_no_preempt_mid_batch(rq, cs);
}
/* Emit fini breadcrumb */
before_fini_breadcrumb_user_interrupt_cs = cs;
cs = gen8_emit_ggtt_write(cs,
rq->fence.seqno,
i915_request_active_timeline(rq)->hwsp_offset,
0);
/* User interrupt */
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
/* Ensure our math for skip + emit is correct */
GEM_BUG_ON(before_fini_breadcrumb_user_interrupt_cs + NON_SKIP_LEN !=
cs);
GEM_BUG_ON(start_fini_breadcrumb_cs +
ce->engine->emit_fini_breadcrumb_dw != cs);
rq->tail = intel_ring_offset(rq, cs);
return cs;
}
static u32 *
__emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
u32 *cs)
{
struct intel_context *ce = rq->context;
struct intel_context *parent = intel_context_to_parent(ce);
GEM_BUG_ON(!intel_context_is_child(ce));
/* Turn on preemption */
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
*cs++ = MI_NOOP;
/* Signal parent */
cs = gen8_emit_ggtt_write(cs,
PARENT_GO_FINI_BREADCRUMB,
get_children_join_addr(parent,
ce->parallel.child_index),
0);
/* Wait parent on for go */
*cs++ = (MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_EQ_SDD);
*cs++ = CHILD_GO_FINI_BREADCRUMB;
*cs++ = get_children_go_addr(parent);
*cs++ = 0;
return cs;
}
static u32 *
emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
u32 *cs)
{
struct intel_context *ce = rq->context;
__maybe_unused u32 *before_fini_breadcrumb_user_interrupt_cs;
__maybe_unused u32 *start_fini_breadcrumb_cs = cs;
GEM_BUG_ON(!intel_context_is_child(ce));
if (unlikely(skip_handshake(rq))) {
/*
* NOP everything in __emit_fini_breadcrumb_child_no_preempt_mid_batch,
* the NON_SKIP_LEN comes from the length of the emits below.
*/
memset(cs, 0, sizeof(u32) *
(ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN));
cs += ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN;
} else {
cs = __emit_fini_breadcrumb_child_no_preempt_mid_batch(rq, cs);
}
/* Emit fini breadcrumb */
before_fini_breadcrumb_user_interrupt_cs = cs;
cs = gen8_emit_ggtt_write(cs,
rq->fence.seqno,
i915_request_active_timeline(rq)->hwsp_offset,
0);
/* User interrupt */
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
/* Ensure our math for skip + emit is correct */
GEM_BUG_ON(before_fini_breadcrumb_user_interrupt_cs + NON_SKIP_LEN !=
cs);
GEM_BUG_ON(start_fini_breadcrumb_cs +
ce->engine->emit_fini_breadcrumb_dw != cs);
rq->tail = intel_ring_offset(rq, cs);
return cs;
}
#undef NON_SKIP_LEN
static struct intel_context *
guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
unsigned long flags)
{
struct guc_virtual_engine *ve;
struct intel_guc *guc;
unsigned int n;
int err;
ve = kzalloc(sizeof(*ve), GFP_KERNEL);
if (!ve)
return ERR_PTR(-ENOMEM);
guc = &siblings[0]->gt->uc.guc;
ve->base.i915 = siblings[0]->i915;
ve->base.gt = siblings[0]->gt;
ve->base.uncore = siblings[0]->uncore;
ve->base.id = -1;
ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
ve->base.saturated = ALL_ENGINES;
snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
ve->base.sched_engine = i915_sched_engine_get(guc->sched_engine);
ve->base.cops = &virtual_guc_context_ops;
ve->base.request_alloc = guc_request_alloc;
ve->base.bump_serial = virtual_guc_bump_serial;
ve->base.submit_request = guc_submit_request;
ve->base.flags = I915_ENGINE_IS_VIRTUAL;
BUILD_BUG_ON(ilog2(VIRTUAL_ENGINES) < I915_NUM_ENGINES);
ve->base.mask = VIRTUAL_ENGINES;
intel_context_init(&ve->context, &ve->base);
for (n = 0; n < count; n++) {
struct intel_engine_cs *sibling = siblings[n];
GEM_BUG_ON(!is_power_of_2(sibling->mask));
if (sibling->mask & ve->base.mask) {
guc_dbg(guc, "duplicate %s entry in load balancer\n",
sibling->name);
err = -EINVAL;
goto err_put;
}
ve->base.mask |= sibling->mask;
ve->base.logical_mask |= sibling->logical_mask;
if (n != 0 && ve->base.class != sibling->class) {
guc_dbg(guc, "invalid mixing of engine class, sibling %d, already %d\n",
sibling->class, ve->base.class);
err = -EINVAL;
goto err_put;
} else if (n == 0) {
ve->base.class = sibling->class;
ve->base.uabi_class = sibling->uabi_class;
snprintf(ve->base.name, sizeof(ve->base.name),
"v%dx%d", ve->base.class, count);
ve->base.context_size = sibling->context_size;
ve->base.add_active_request =
sibling->add_active_request;
ve->base.remove_active_request =
sibling->remove_active_request;
ve->base.emit_bb_start = sibling->emit_bb_start;
ve->base.emit_flush = sibling->emit_flush;
ve->base.emit_init_breadcrumb =
sibling->emit_init_breadcrumb;
ve->base.emit_fini_breadcrumb =
sibling->emit_fini_breadcrumb;
ve->base.emit_fini_breadcrumb_dw =
sibling->emit_fini_breadcrumb_dw;
ve->base.breadcrumbs =
intel_breadcrumbs_get(sibling->breadcrumbs);
ve->base.flags |= sibling->flags;
ve->base.props.timeslice_duration_ms =
sibling->props.timeslice_duration_ms;
ve->base.props.preempt_timeout_ms =
sibling->props.preempt_timeout_ms;
}
}
return &ve->context;
err_put:
intel_context_put(&ve->context);
return ERR_PTR(err);
}
bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve)
{
struct intel_engine_cs *engine;
intel_engine_mask_t tmp, mask = ve->mask;
for_each_engine_masked(engine, ve->gt, mask, tmp)
if (READ_ONCE(engine->props.heartbeat_interval_ms))
return true;
return false;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_guc.c"
#include "selftest_guc_multi_lrc.c"
#include "selftest_guc_hangcheck.c"
#endif
| linux-master | drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2014-2019 Intel Corporation
*/
#include <linux/bsearch.h>
#include "gem/i915_gem_lmem.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_mcr.h"
#include "gt/intel_gt_regs.h"
#include "gt/intel_lrc.h"
#include "gt/shmem_utils.h"
#include "intel_guc_ads.h"
#include "intel_guc_capture.h"
#include "intel_guc_fwif.h"
#include "intel_guc_print.h"
#include "intel_uc.h"
#include "i915_drv.h"
/*
* The Additional Data Struct (ADS) has pointers for different buffers used by
* the GuC. One single gem object contains the ADS struct itself (guc_ads) and
* all the extra buffers indirectly linked via the ADS struct's entries.
*
* Layout of the ADS blob allocated for the GuC:
*
* +---------------------------------------+ <== base
* | guc_ads |
* +---------------------------------------+
* | guc_policies |
* +---------------------------------------+
* | guc_gt_system_info |
* +---------------------------------------+
* | guc_engine_usage |
* +---------------------------------------+ <== static
* | guc_mmio_reg[countA] (engine 0.0) |
* | guc_mmio_reg[countB] (engine 0.1) |
* | guc_mmio_reg[countC] (engine 1.0) |
* | ... |
* +---------------------------------------+ <== dynamic
* | padding |
* +---------------------------------------+ <== 4K aligned
* | golden contexts |
* +---------------------------------------+
* | padding |
* +---------------------------------------+ <== 4K aligned
* | capture lists |
* +---------------------------------------+
* | padding |
* +---------------------------------------+ <== 4K aligned
* | private data |
* +---------------------------------------+
* | padding |
* +---------------------------------------+ <== 4K aligned
*/
struct __guc_ads_blob {
struct guc_ads ads;
struct guc_policies policies;
struct guc_gt_system_info system_info;
struct guc_engine_usage engine_usage;
/* From here on, location is dynamic! Refer to above diagram. */
struct guc_mmio_reg regset[];
} __packed;
#define ads_blob_read(guc_, field_) \
iosys_map_rd_field(&(guc_)->ads_map, 0, struct __guc_ads_blob, field_)
#define ads_blob_write(guc_, field_, val_) \
iosys_map_wr_field(&(guc_)->ads_map, 0, struct __guc_ads_blob, \
field_, val_)
#define info_map_write(map_, field_, val_) \
iosys_map_wr_field(map_, 0, struct guc_gt_system_info, field_, val_)
#define info_map_read(map_, field_) \
iosys_map_rd_field(map_, 0, struct guc_gt_system_info, field_)
static u32 guc_ads_regset_size(struct intel_guc *guc)
{
GEM_BUG_ON(!guc->ads_regset_size);
return guc->ads_regset_size;
}
static u32 guc_ads_golden_ctxt_size(struct intel_guc *guc)
{
return PAGE_ALIGN(guc->ads_golden_ctxt_size);
}
static u32 guc_ads_capture_size(struct intel_guc *guc)
{
return PAGE_ALIGN(guc->ads_capture_size);
}
static u32 guc_ads_private_data_size(struct intel_guc *guc)
{
return PAGE_ALIGN(guc->fw.private_data_size);
}
static u32 guc_ads_regset_offset(struct intel_guc *guc)
{
return offsetof(struct __guc_ads_blob, regset);
}
static u32 guc_ads_golden_ctxt_offset(struct intel_guc *guc)
{
u32 offset;
offset = guc_ads_regset_offset(guc) +
guc_ads_regset_size(guc);
return PAGE_ALIGN(offset);
}
static u32 guc_ads_capture_offset(struct intel_guc *guc)
{
u32 offset;
offset = guc_ads_golden_ctxt_offset(guc) +
guc_ads_golden_ctxt_size(guc);
return PAGE_ALIGN(offset);
}
static u32 guc_ads_private_data_offset(struct intel_guc *guc)
{
u32 offset;
offset = guc_ads_capture_offset(guc) +
guc_ads_capture_size(guc);
return PAGE_ALIGN(offset);
}
static u32 guc_ads_blob_size(struct intel_guc *guc)
{
return guc_ads_private_data_offset(guc) +
guc_ads_private_data_size(guc);
}
static void guc_policies_init(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
struct drm_i915_private *i915 = gt->i915;
u32 global_flags = 0;
ads_blob_write(guc, policies.dpc_promote_time,
GLOBAL_POLICY_DEFAULT_DPC_PROMOTE_TIME_US);
ads_blob_write(guc, policies.max_num_work_items,
GLOBAL_POLICY_MAX_NUM_WI);
if (i915->params.reset < 2)
global_flags |= GLOBAL_POLICY_DISABLE_ENGINE_RESET;
ads_blob_write(guc, policies.global_flags, global_flags);
ads_blob_write(guc, policies.is_valid, 1);
}
void intel_guc_ads_print_policy_info(struct intel_guc *guc,
struct drm_printer *dp)
{
if (unlikely(iosys_map_is_null(&guc->ads_map)))
return;
drm_printf(dp, "Global scheduling policies:\n");
drm_printf(dp, " DPC promote time = %u\n",
ads_blob_read(guc, policies.dpc_promote_time));
drm_printf(dp, " Max num work items = %u\n",
ads_blob_read(guc, policies.max_num_work_items));
drm_printf(dp, " Flags = %u\n",
ads_blob_read(guc, policies.global_flags));
}
static int guc_action_policies_update(struct intel_guc *guc, u32 policy_offset)
{
u32 action[] = {
INTEL_GUC_ACTION_GLOBAL_SCHED_POLICY_CHANGE,
policy_offset
};
return intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
}
int intel_guc_global_policies_update(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
u32 scheduler_policies;
intel_wakeref_t wakeref;
int ret;
if (iosys_map_is_null(&guc->ads_map))
return -EOPNOTSUPP;
scheduler_policies = ads_blob_read(guc, ads.scheduler_policies);
GEM_BUG_ON(!scheduler_policies);
guc_policies_init(guc);
if (!intel_guc_is_ready(guc))
return 0;
with_intel_runtime_pm(>->i915->runtime_pm, wakeref)
ret = guc_action_policies_update(guc, scheduler_policies);
return ret;
}
static void guc_mapping_table_init(struct intel_gt *gt,
struct iosys_map *info_map)
{
unsigned int i, j;
struct intel_engine_cs *engine;
enum intel_engine_id id;
/* Table must be set to invalid values for entries not used */
for (i = 0; i < GUC_MAX_ENGINE_CLASSES; ++i)
for (j = 0; j < GUC_MAX_INSTANCES_PER_CLASS; ++j)
info_map_write(info_map, mapping_table[i][j],
GUC_MAX_INSTANCES_PER_CLASS);
for_each_engine(engine, gt, id) {
u8 guc_class = engine_class_to_guc_class(engine->class);
info_map_write(info_map, mapping_table[guc_class][ilog2(engine->logical_mask)],
engine->instance);
}
}
/*
* The save/restore register list must be pre-calculated to a temporary
* buffer before it can be copied inside the ADS.
*/
struct temp_regset {
/*
* ptr to the section of the storage for the engine currently being
* worked on
*/
struct guc_mmio_reg *registers;
/* ptr to the base of the allocated storage for all engines */
struct guc_mmio_reg *storage;
u32 storage_used;
u32 storage_max;
};
static int guc_mmio_reg_cmp(const void *a, const void *b)
{
const struct guc_mmio_reg *ra = a;
const struct guc_mmio_reg *rb = b;
return (int)ra->offset - (int)rb->offset;
}
static struct guc_mmio_reg * __must_check
__mmio_reg_add(struct temp_regset *regset, struct guc_mmio_reg *reg)
{
u32 pos = regset->storage_used;
struct guc_mmio_reg *slot;
if (pos >= regset->storage_max) {
size_t size = ALIGN((pos + 1) * sizeof(*slot), PAGE_SIZE);
struct guc_mmio_reg *r = krealloc(regset->storage,
size, GFP_KERNEL);
if (!r) {
WARN_ONCE(1, "Incomplete regset list: can't add register (%d)\n",
-ENOMEM);
return ERR_PTR(-ENOMEM);
}
regset->registers = r + (regset->registers - regset->storage);
regset->storage = r;
regset->storage_max = size / sizeof(*slot);
}
slot = ®set->storage[pos];
regset->storage_used++;
*slot = *reg;
return slot;
}
static long __must_check guc_mmio_reg_add(struct intel_gt *gt,
struct temp_regset *regset,
u32 offset, u32 flags)
{
u32 count = regset->storage_used - (regset->registers - regset->storage);
struct guc_mmio_reg entry = {
.offset = offset,
.flags = flags,
};
struct guc_mmio_reg *slot;
/*
* The mmio list is built using separate lists within the driver.
* It's possible that at some point we may attempt to add the same
* register more than once. Do not consider this an error; silently
* move on if the register is already in the list.
*/
if (bsearch(&entry, regset->registers, count,
sizeof(entry), guc_mmio_reg_cmp))
return 0;
slot = __mmio_reg_add(regset, &entry);
if (IS_ERR(slot))
return PTR_ERR(slot);
while (slot-- > regset->registers) {
GEM_BUG_ON(slot[0].offset == slot[1].offset);
if (slot[1].offset > slot[0].offset)
break;
swap(slot[1], slot[0]);
}
return 0;
}
#define GUC_MMIO_REG_ADD(gt, regset, reg, masked) \
guc_mmio_reg_add(gt, \
regset, \
i915_mmio_reg_offset(reg), \
(masked) ? GUC_REGSET_MASKED : 0)
#define GUC_REGSET_STEERING(group, instance) ( \
FIELD_PREP(GUC_REGSET_STEERING_GROUP, (group)) | \
FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, (instance)) | \
GUC_REGSET_NEEDS_STEERING \
)
static long __must_check guc_mcr_reg_add(struct intel_gt *gt,
struct temp_regset *regset,
i915_mcr_reg_t reg, u32 flags)
{
u8 group, inst;
/*
* The GuC doesn't have a default steering, so we need to explicitly
* steer all registers that need steering. However, we do not keep track
* of all the steering ranges, only of those that have a chance of using
* a non-default steering from the i915 pov. Instead of adding such
* tracking, it is easier to just program the default steering for all
* regs that don't need a non-default one.
*/
intel_gt_mcr_get_nonterminated_steering(gt, reg, &group, &inst);
flags |= GUC_REGSET_STEERING(group, inst);
return guc_mmio_reg_add(gt, regset, i915_mmio_reg_offset(reg), flags);
}
#define GUC_MCR_REG_ADD(gt, regset, reg, masked) \
guc_mcr_reg_add(gt, \
regset, \
(reg), \
(masked) ? GUC_REGSET_MASKED : 0)
static int guc_mmio_regset_init(struct temp_regset *regset,
struct intel_engine_cs *engine)
{
struct intel_gt *gt = engine->gt;
const u32 base = engine->mmio_base;
struct i915_wa_list *wal = &engine->wa_list;
struct i915_wa *wa;
unsigned int i;
int ret = 0;
/*
* Each engine's registers point to a new start relative to
* storage
*/
regset->registers = regset->storage + regset->storage_used;
ret |= GUC_MMIO_REG_ADD(gt, regset, RING_MODE_GEN7(base), true);
ret |= GUC_MMIO_REG_ADD(gt, regset, RING_HWS_PGA(base), false);
ret |= GUC_MMIO_REG_ADD(gt, regset, RING_IMR(base), false);
if ((engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE) &&
CCS_MASK(engine->gt))
ret |= GUC_MMIO_REG_ADD(gt, regset, GEN12_RCU_MODE, true);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
ret |= GUC_MMIO_REG_ADD(gt, regset, wa->reg, wa->masked_reg);
/* Be extra paranoid and include all whitelist registers. */
for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++)
ret |= GUC_MMIO_REG_ADD(gt, regset,
RING_FORCE_TO_NONPRIV(base, i),
false);
/* add in local MOCS registers */
for (i = 0; i < LNCFCMOCS_REG_COUNT; i++)
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
ret |= GUC_MCR_REG_ADD(gt, regset, XEHP_LNCFCMOCS(i), false);
else
ret |= GUC_MMIO_REG_ADD(gt, regset, GEN9_LNCFCMOCS(i), false);
if (GRAPHICS_VER(engine->i915) >= 12) {
ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL0, false);
ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL1, false);
ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL2, false);
ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL3, false);
ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL4, false);
ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL5, false);
ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL6, false);
}
return ret ? -1 : 0;
}
static long guc_mmio_reg_state_create(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct temp_regset temp_set = {};
long total = 0;
long ret;
for_each_engine(engine, gt, id) {
u32 used = temp_set.storage_used;
ret = guc_mmio_regset_init(&temp_set, engine);
if (ret < 0)
goto fail_regset_init;
guc->ads_regset_count[id] = temp_set.storage_used - used;
total += guc->ads_regset_count[id];
}
guc->ads_regset = temp_set.storage;
guc_dbg(guc, "Used %zu KB for temporary ADS regset\n",
(temp_set.storage_max * sizeof(struct guc_mmio_reg)) >> 10);
return total * sizeof(struct guc_mmio_reg);
fail_regset_init:
kfree(temp_set.storage);
return ret;
}
static void guc_mmio_reg_state_init(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
struct intel_engine_cs *engine;
enum intel_engine_id id;
u32 addr_ggtt, offset;
offset = guc_ads_regset_offset(guc);
addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset;
iosys_map_memcpy_to(&guc->ads_map, offset, guc->ads_regset,
guc->ads_regset_size);
for_each_engine(engine, gt, id) {
u32 count = guc->ads_regset_count[id];
u8 guc_class;
/* Class index is checked in class converter */
GEM_BUG_ON(engine->instance >= GUC_MAX_INSTANCES_PER_CLASS);
guc_class = engine_class_to_guc_class(engine->class);
if (!count) {
ads_blob_write(guc,
ads.reg_state_list[guc_class][engine->instance].address,
0);
ads_blob_write(guc,
ads.reg_state_list[guc_class][engine->instance].count,
0);
continue;
}
ads_blob_write(guc,
ads.reg_state_list[guc_class][engine->instance].address,
addr_ggtt);
ads_blob_write(guc,
ads.reg_state_list[guc_class][engine->instance].count,
count);
addr_ggtt += count * sizeof(struct guc_mmio_reg);
}
}
static void fill_engine_enable_masks(struct intel_gt *gt,
struct iosys_map *info_map)
{
info_map_write(info_map, engine_enabled_masks[GUC_RENDER_CLASS], RCS_MASK(gt));
info_map_write(info_map, engine_enabled_masks[GUC_COMPUTE_CLASS], CCS_MASK(gt));
info_map_write(info_map, engine_enabled_masks[GUC_BLITTER_CLASS], BCS_MASK(gt));
info_map_write(info_map, engine_enabled_masks[GUC_VIDEO_CLASS], VDBOX_MASK(gt));
info_map_write(info_map, engine_enabled_masks[GUC_VIDEOENHANCE_CLASS], VEBOX_MASK(gt));
/* The GSC engine is an instance (6) of OTHER_CLASS */
if (gt->engine[GSC0])
info_map_write(info_map, engine_enabled_masks[GUC_GSC_OTHER_CLASS],
BIT(gt->engine[GSC0]->instance));
}
#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32))
#define XEHP_LR_HW_CONTEXT_SIZE (96 * sizeof(u32))
#define LR_HW_CONTEXT_SZ(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50) ? \
XEHP_LR_HW_CONTEXT_SIZE : \
LR_HW_CONTEXT_SIZE)
#define LRC_SKIP_SIZE(i915) (LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SZ(i915))
static int guc_prep_golden_context(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
u32 addr_ggtt, offset;
u32 total_size = 0, alloc_size, real_size;
u8 engine_class, guc_class;
struct guc_gt_system_info local_info;
struct iosys_map info_map;
/*
* Reserve the memory for the golden contexts and point GuC at it but
* leave it empty for now. The context data will be filled in later
* once there is something available to put there.
*
* Note that the HWSP and ring context are not included.
*
* Note also that the storage must be pinned in the GGTT, so that the
* address won't change after GuC has been told where to find it. The
* GuC will also validate that the LRC base + size fall within the
* allowed GGTT range.
*/
if (!iosys_map_is_null(&guc->ads_map)) {
offset = guc_ads_golden_ctxt_offset(guc);
addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset;
info_map = IOSYS_MAP_INIT_OFFSET(&guc->ads_map,
offsetof(struct __guc_ads_blob, system_info));
} else {
memset(&local_info, 0, sizeof(local_info));
iosys_map_set_vaddr(&info_map, &local_info);
fill_engine_enable_masks(gt, &info_map);
}
for (engine_class = 0; engine_class <= MAX_ENGINE_CLASS; ++engine_class) {
guc_class = engine_class_to_guc_class(engine_class);
if (!info_map_read(&info_map, engine_enabled_masks[guc_class]))
continue;
real_size = intel_engine_context_size(gt, engine_class);
alloc_size = PAGE_ALIGN(real_size);
total_size += alloc_size;
if (iosys_map_is_null(&guc->ads_map))
continue;
/*
* This interface is slightly confusing. We need to pass the
* base address of the full golden context and the size of just
* the engine state, which is the section of the context image
* that starts after the execlists context. This is required to
* allow the GuC to restore just the engine state when a
* watchdog reset occurs.
* We calculate the engine state size by removing the size of
* what comes before it in the context image (which is identical
* on all engines).
*/
ads_blob_write(guc, ads.eng_state_size[guc_class],
real_size - LRC_SKIP_SIZE(gt->i915));
ads_blob_write(guc, ads.golden_context_lrca[guc_class],
addr_ggtt);
addr_ggtt += alloc_size;
}
/* Make sure current size matches what we calculated previously */
if (guc->ads_golden_ctxt_size)
GEM_BUG_ON(guc->ads_golden_ctxt_size != total_size);
return total_size;
}
static struct intel_engine_cs *find_engine_state(struct intel_gt *gt, u8 engine_class)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, gt, id) {
if (engine->class != engine_class)
continue;
if (!engine->default_state)
continue;
return engine;
}
return NULL;
}
static void guc_init_golden_context(struct intel_guc *guc)
{
struct intel_engine_cs *engine;
struct intel_gt *gt = guc_to_gt(guc);
unsigned long offset;
u32 addr_ggtt, total_size = 0, alloc_size, real_size;
u8 engine_class, guc_class;
if (!intel_uc_uses_guc_submission(>->uc))
return;
GEM_BUG_ON(iosys_map_is_null(&guc->ads_map));
/*
* Go back and fill in the golden context data now that it is
* available.
*/
offset = guc_ads_golden_ctxt_offset(guc);
addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset;
for (engine_class = 0; engine_class <= MAX_ENGINE_CLASS; ++engine_class) {
guc_class = engine_class_to_guc_class(engine_class);
if (!ads_blob_read(guc, system_info.engine_enabled_masks[guc_class]))
continue;
real_size = intel_engine_context_size(gt, engine_class);
alloc_size = PAGE_ALIGN(real_size);
total_size += alloc_size;
engine = find_engine_state(gt, engine_class);
if (!engine) {
guc_err(guc, "No engine state recorded for class %d!\n",
engine_class);
ads_blob_write(guc, ads.eng_state_size[guc_class], 0);
ads_blob_write(guc, ads.golden_context_lrca[guc_class], 0);
continue;
}
GEM_BUG_ON(ads_blob_read(guc, ads.eng_state_size[guc_class]) !=
real_size - LRC_SKIP_SIZE(gt->i915));
GEM_BUG_ON(ads_blob_read(guc, ads.golden_context_lrca[guc_class]) != addr_ggtt);
addr_ggtt += alloc_size;
shmem_read_to_iosys_map(engine->default_state, 0, &guc->ads_map,
offset, real_size);
offset += alloc_size;
}
GEM_BUG_ON(guc->ads_golden_ctxt_size != total_size);
}
static u32 guc_get_capture_engine_mask(struct iosys_map *info_map, u32 capture_class)
{
u32 mask;
switch (capture_class) {
case GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE:
mask = info_map_read(info_map, engine_enabled_masks[GUC_RENDER_CLASS]);
mask |= info_map_read(info_map, engine_enabled_masks[GUC_COMPUTE_CLASS]);
break;
case GUC_CAPTURE_LIST_CLASS_VIDEO:
mask = info_map_read(info_map, engine_enabled_masks[GUC_VIDEO_CLASS]);
break;
case GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE:
mask = info_map_read(info_map, engine_enabled_masks[GUC_VIDEOENHANCE_CLASS]);
break;
case GUC_CAPTURE_LIST_CLASS_BLITTER:
mask = info_map_read(info_map, engine_enabled_masks[GUC_BLITTER_CLASS]);
break;
case GUC_CAPTURE_LIST_CLASS_GSC_OTHER:
mask = info_map_read(info_map, engine_enabled_masks[GUC_GSC_OTHER_CLASS]);
break;
default:
mask = 0;
}
return mask;
}
static int
guc_capture_prep_lists(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
u32 ads_ggtt, capture_offset, null_ggtt, total_size = 0;
struct guc_gt_system_info local_info;
struct iosys_map info_map;
bool ads_is_mapped;
size_t size = 0;
void *ptr;
int i, j;
ads_is_mapped = !iosys_map_is_null(&guc->ads_map);
if (ads_is_mapped) {
capture_offset = guc_ads_capture_offset(guc);
ads_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma);
info_map = IOSYS_MAP_INIT_OFFSET(&guc->ads_map,
offsetof(struct __guc_ads_blob, system_info));
} else {
memset(&local_info, 0, sizeof(local_info));
iosys_map_set_vaddr(&info_map, &local_info);
fill_engine_enable_masks(gt, &info_map);
}
/* first, set aside the first page for a capture_list with zero descriptors */
total_size = PAGE_SIZE;
if (ads_is_mapped) {
if (!intel_guc_capture_getnullheader(guc, &ptr, &size))
iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size);
null_ggtt = ads_ggtt + capture_offset;
capture_offset += PAGE_SIZE;
}
for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; i++) {
for (j = 0; j < GUC_MAX_ENGINE_CLASSES; j++) {
u32 engine_mask = guc_get_capture_engine_mask(&info_map, j);
/* null list if we dont have said engine or list */
if (!engine_mask) {
if (ads_is_mapped) {
ads_blob_write(guc, ads.capture_class[i][j], null_ggtt);
ads_blob_write(guc, ads.capture_instance[i][j], null_ggtt);
}
continue;
}
if (intel_guc_capture_getlistsize(guc, i,
GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
j, &size)) {
if (ads_is_mapped)
ads_blob_write(guc, ads.capture_class[i][j], null_ggtt);
goto engine_instance_list;
}
total_size += size;
if (ads_is_mapped) {
if (total_size > guc->ads_capture_size ||
intel_guc_capture_getlist(guc, i,
GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
j, &ptr)) {
ads_blob_write(guc, ads.capture_class[i][j], null_ggtt);
continue;
}
ads_blob_write(guc, ads.capture_class[i][j], ads_ggtt +
capture_offset);
iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size);
capture_offset += size;
}
engine_instance_list:
if (intel_guc_capture_getlistsize(guc, i,
GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
j, &size)) {
if (ads_is_mapped)
ads_blob_write(guc, ads.capture_instance[i][j], null_ggtt);
continue;
}
total_size += size;
if (ads_is_mapped) {
if (total_size > guc->ads_capture_size ||
intel_guc_capture_getlist(guc, i,
GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
j, &ptr)) {
ads_blob_write(guc, ads.capture_instance[i][j], null_ggtt);
continue;
}
ads_blob_write(guc, ads.capture_instance[i][j], ads_ggtt +
capture_offset);
iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size);
capture_offset += size;
}
}
if (intel_guc_capture_getlistsize(guc, i, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0, &size)) {
if (ads_is_mapped)
ads_blob_write(guc, ads.capture_global[i], null_ggtt);
continue;
}
total_size += size;
if (ads_is_mapped) {
if (total_size > guc->ads_capture_size ||
intel_guc_capture_getlist(guc, i, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0,
&ptr)) {
ads_blob_write(guc, ads.capture_global[i], null_ggtt);
continue;
}
ads_blob_write(guc, ads.capture_global[i], ads_ggtt + capture_offset);
iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size);
capture_offset += size;
}
}
if (guc->ads_capture_size && guc->ads_capture_size != PAGE_ALIGN(total_size))
guc_warn(guc, "ADS capture alloc size changed from %d to %d\n",
guc->ads_capture_size, PAGE_ALIGN(total_size));
return PAGE_ALIGN(total_size);
}
static void __guc_ads_init(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
struct drm_i915_private *i915 = gt->i915;
struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(&guc->ads_map,
offsetof(struct __guc_ads_blob, system_info));
u32 base;
/* GuC scheduling policies */
guc_policies_init(guc);
/* System info */
fill_engine_enable_masks(gt, &info_map);
ads_blob_write(guc, system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_SLICE_ENABLED],
hweight8(gt->info.sseu.slice_mask));
ads_blob_write(guc, system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_VDBOX_SFC_SUPPORT_MASK],
gt->info.vdbox_sfc_access);
if (GRAPHICS_VER(i915) >= 12 && !IS_DGFX(i915)) {
u32 distdbreg = intel_uncore_read(gt->uncore,
GEN12_DIST_DBS_POPULATED);
ads_blob_write(guc,
system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_DOORBELL_COUNT_PER_SQIDI],
((distdbreg >> GEN12_DOORBELLS_PER_SQIDI_SHIFT)
& GEN12_DOORBELLS_PER_SQIDI) + 1);
}
/* Golden contexts for re-initialising after a watchdog reset */
guc_prep_golden_context(guc);
guc_mapping_table_init(guc_to_gt(guc), &info_map);
base = intel_guc_ggtt_offset(guc, guc->ads_vma);
/* Lists for error capture debug */
guc_capture_prep_lists(guc);
/* ADS */
ads_blob_write(guc, ads.scheduler_policies, base +
offsetof(struct __guc_ads_blob, policies));
ads_blob_write(guc, ads.gt_system_info, base +
offsetof(struct __guc_ads_blob, system_info));
/* MMIO save/restore list */
guc_mmio_reg_state_init(guc);
/* Private Data */
ads_blob_write(guc, ads.private_data, base +
guc_ads_private_data_offset(guc));
i915_gem_object_flush_map(guc->ads_vma->obj);
}
/**
* intel_guc_ads_create() - allocates and initializes GuC ADS.
* @guc: intel_guc struct
*
* GuC needs memory block (Additional Data Struct), where it will store
* some data. Allocate and initialize such memory block for GuC use.
*/
int intel_guc_ads_create(struct intel_guc *guc)
{
void *ads_blob;
u32 size;
int ret;
GEM_BUG_ON(guc->ads_vma);
/*
* Create reg state size dynamically on system memory to be copied to
* the final ads blob on gt init/reset
*/
ret = guc_mmio_reg_state_create(guc);
if (ret < 0)
return ret;
guc->ads_regset_size = ret;
/* Likewise the golden contexts: */
ret = guc_prep_golden_context(guc);
if (ret < 0)
return ret;
guc->ads_golden_ctxt_size = ret;
/* Likewise the capture lists: */
ret = guc_capture_prep_lists(guc);
if (ret < 0)
return ret;
guc->ads_capture_size = ret;
/* Now the total size can be determined: */
size = guc_ads_blob_size(guc);
ret = intel_guc_allocate_and_map_vma(guc, size, &guc->ads_vma,
&ads_blob);
if (ret)
return ret;
if (i915_gem_object_is_lmem(guc->ads_vma->obj))
iosys_map_set_vaddr_iomem(&guc->ads_map, (void __iomem *)ads_blob);
else
iosys_map_set_vaddr(&guc->ads_map, ads_blob);
__guc_ads_init(guc);
return 0;
}
void intel_guc_ads_init_late(struct intel_guc *guc)
{
/*
* The golden context setup requires the saved engine state from
* __engines_record_defaults(). However, that requires engines to be
* operational which means the ADS must already have been configured.
* Fortunately, the golden context state is not needed until a hang
* occurs, so it can be filled in during this late init phase.
*/
guc_init_golden_context(guc);
}
void intel_guc_ads_destroy(struct intel_guc *guc)
{
i915_vma_unpin_and_release(&guc->ads_vma, I915_VMA_RELEASE_MAP);
iosys_map_clear(&guc->ads_map);
kfree(guc->ads_regset);
}
static void guc_ads_private_data_reset(struct intel_guc *guc)
{
u32 size;
size = guc_ads_private_data_size(guc);
if (!size)
return;
iosys_map_memset(&guc->ads_map, guc_ads_private_data_offset(guc),
0, size);
}
/**
* intel_guc_ads_reset() - prepares GuC Additional Data Struct for reuse
* @guc: intel_guc struct
*
* GuC stores some data in ADS, which might be stale after a reset.
* Reinitialize whole ADS in case any part of it was corrupted during
* previous GuC run.
*/
void intel_guc_ads_reset(struct intel_guc *guc)
{
if (!guc->ads_vma)
return;
__guc_ads_init(guc);
guc_ads_private_data_reset(guc);
}
u32 intel_guc_engine_usage_offset(struct intel_guc *guc)
{
return intel_guc_ggtt_offset(guc, guc->ads_vma) +
offsetof(struct __guc_ads_blob, engine_usage);
}
struct iosys_map intel_guc_engine_usage_record_map(struct intel_engine_cs *engine)
{
struct intel_guc *guc = &engine->gt->uc.guc;
u8 guc_class = engine_class_to_guc_class(engine->class);
size_t offset = offsetof(struct __guc_ads_blob,
engine_usage.engines[guc_class][ilog2(engine->logical_mask)]);
return IOSYS_MAP_INIT_OFFSET(&guc->ads_map, offset);
}
| linux-master | drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2023 Intel Corporation
*/
#include <drm/drm_print.h>
#include "gt/intel_gt.h"
#include "gt/intel_gt_debugfs.h"
#include "gt/intel_gt_print.h"
#include "intel_gsc_uc.h"
#include "intel_gsc_uc_debugfs.h"
#include "i915_drv.h"
static int gsc_info_show(struct seq_file *m, void *data)
{
struct drm_printer p = drm_seq_file_printer(m);
struct intel_gsc_uc *gsc = m->private;
if (!intel_gsc_uc_is_supported(gsc))
return -ENODEV;
intel_gsc_uc_load_status(gsc, &p);
return 0;
}
DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(gsc_info);
void intel_gsc_uc_debugfs_register(struct intel_gsc_uc *gsc_uc, struct dentry *root)
{
static const struct intel_gt_debugfs_file files[] = {
{ "gsc_info", &gsc_info_fops, NULL },
};
if (!intel_gsc_uc_is_supported(gsc_uc))
return;
intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gsc_uc);
}
| linux-master | drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_debugfs.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2014-2019 Intel Corporation
*/
#include <linux/debugfs.h>
#include <linux/string_helpers.h>
#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_memcpy.h"
#include "intel_guc_capture.h"
#include "intel_guc_log.h"
#include "intel_guc_print.h"
#if defined(CONFIG_DRM_I915_DEBUG_GUC)
#define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_2M
#define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_16M
#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M
#elif defined(CONFIG_DRM_I915_DEBUG_GEM)
#define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_1M
#define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_2M
#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M
#else
#define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_8K
#define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_64K
#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M
#endif
static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log);
struct guc_log_section {
u32 max;
u32 flag;
u32 default_val;
const char *name;
};
static void _guc_log_init_sizes(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
static const struct guc_log_section sections[GUC_LOG_SECTIONS_LIMIT] = {
{
GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT,
GUC_LOG_LOG_ALLOC_UNITS,
GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE,
"crash dump"
},
{
GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT,
GUC_LOG_LOG_ALLOC_UNITS,
GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE,
"debug",
},
{
GUC_LOG_CAPTURE_MASK >> GUC_LOG_CAPTURE_SHIFT,
GUC_LOG_CAPTURE_ALLOC_UNITS,
GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE,
"capture",
}
};
int i;
for (i = 0; i < GUC_LOG_SECTIONS_LIMIT; i++)
log->sizes[i].bytes = sections[i].default_val;
/* If debug size > 1MB then bump default crash size to keep the same units */
if (log->sizes[GUC_LOG_SECTIONS_DEBUG].bytes >= SZ_1M &&
GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE < SZ_1M)
log->sizes[GUC_LOG_SECTIONS_CRASH].bytes = SZ_1M;
/* Prepare the GuC API structure fields: */
for (i = 0; i < GUC_LOG_SECTIONS_LIMIT; i++) {
/* Convert to correct units */
if ((log->sizes[i].bytes % SZ_1M) == 0) {
log->sizes[i].units = SZ_1M;
log->sizes[i].flag = sections[i].flag;
} else {
log->sizes[i].units = SZ_4K;
log->sizes[i].flag = 0;
}
if (!IS_ALIGNED(log->sizes[i].bytes, log->sizes[i].units))
guc_err(guc, "Mis-aligned log %s size: 0x%X vs 0x%X!\n",
sections[i].name, log->sizes[i].bytes, log->sizes[i].units);
log->sizes[i].count = log->sizes[i].bytes / log->sizes[i].units;
if (!log->sizes[i].count) {
guc_err(guc, "Zero log %s size!\n", sections[i].name);
} else {
/* Size is +1 unit */
log->sizes[i].count--;
}
/* Clip to field size */
if (log->sizes[i].count > sections[i].max) {
guc_err(guc, "log %s size too large: %d vs %d!\n",
sections[i].name, log->sizes[i].count + 1, sections[i].max + 1);
log->sizes[i].count = sections[i].max;
}
}
if (log->sizes[GUC_LOG_SECTIONS_CRASH].units != log->sizes[GUC_LOG_SECTIONS_DEBUG].units) {
guc_err(guc, "Unit mismatch for crash and debug sections: %d vs %d!\n",
log->sizes[GUC_LOG_SECTIONS_CRASH].units,
log->sizes[GUC_LOG_SECTIONS_DEBUG].units);
log->sizes[GUC_LOG_SECTIONS_CRASH].units = log->sizes[GUC_LOG_SECTIONS_DEBUG].units;
log->sizes[GUC_LOG_SECTIONS_CRASH].count = 0;
}
log->sizes_initialised = true;
}
static void guc_log_init_sizes(struct intel_guc_log *log)
{
if (log->sizes_initialised)
return;
_guc_log_init_sizes(log);
}
static u32 intel_guc_log_section_size_crash(struct intel_guc_log *log)
{
guc_log_init_sizes(log);
return log->sizes[GUC_LOG_SECTIONS_CRASH].bytes;
}
static u32 intel_guc_log_section_size_debug(struct intel_guc_log *log)
{
guc_log_init_sizes(log);
return log->sizes[GUC_LOG_SECTIONS_DEBUG].bytes;
}
u32 intel_guc_log_section_size_capture(struct intel_guc_log *log)
{
guc_log_init_sizes(log);
return log->sizes[GUC_LOG_SECTIONS_CAPTURE].bytes;
}
static u32 intel_guc_log_size(struct intel_guc_log *log)
{
/*
* GuC Log buffer Layout:
*
* NB: Ordering must follow "enum guc_log_buffer_type".
*
* +===============================+ 00B
* | Debug state header |
* +-------------------------------+ 32B
* | Crash dump state header |
* +-------------------------------+ 64B
* | Capture state header |
* +-------------------------------+ 96B
* | |
* +===============================+ PAGE_SIZE (4KB)
* | Debug logs |
* +===============================+ + DEBUG_SIZE
* | Crash Dump logs |
* +===============================+ + CRASH_SIZE
* | Capture logs |
* +===============================+ + CAPTURE_SIZE
*/
return PAGE_SIZE +
intel_guc_log_section_size_crash(log) +
intel_guc_log_section_size_debug(log) +
intel_guc_log_section_size_capture(log);
}
/**
* DOC: GuC firmware log
*
* Firmware log is enabled by setting i915.guc_log_level to the positive level.
* Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
* i915_guc_load_status will print out firmware loading status and scratch
* registers value.
*/
static int guc_action_flush_log_complete(struct intel_guc *guc)
{
u32 action[] = {
INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE,
GUC_DEBUG_LOG_BUFFER
};
return intel_guc_send_nb(guc, action, ARRAY_SIZE(action), 0);
}
static int guc_action_flush_log(struct intel_guc *guc)
{
u32 action[] = {
INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH,
0
};
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
static int guc_action_control_log(struct intel_guc *guc, bool enable,
bool default_logging, u32 verbosity)
{
u32 action[] = {
INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING,
(enable ? GUC_LOG_CONTROL_LOGGING_ENABLED : 0) |
(verbosity << GUC_LOG_CONTROL_VERBOSITY_SHIFT) |
(default_logging ? GUC_LOG_CONTROL_DEFAULT_LOGGING : 0)
};
GEM_BUG_ON(verbosity > GUC_LOG_VERBOSITY_MAX);
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
/*
* Sub buffer switch callback. Called whenever relay has to switch to a new
* sub buffer, relay stays on the same sub buffer if 0 is returned.
*/
static int subbuf_start_callback(struct rchan_buf *buf,
void *subbuf,
void *prev_subbuf,
size_t prev_padding)
{
/*
* Use no-overwrite mode by default, where relay will stop accepting
* new data if there are no empty sub buffers left.
* There is no strict synchronization enforced by relay between Consumer
* and Producer. In overwrite mode, there is a possibility of getting
* inconsistent/garbled data, the producer could be writing on to the
* same sub buffer from which Consumer is reading. This can't be avoided
* unless Consumer is fast enough and can always run in tandem with
* Producer.
*/
if (relay_buf_full(buf))
return 0;
return 1;
}
/*
* file_create() callback. Creates relay file in debugfs.
*/
static struct dentry *create_buf_file_callback(const char *filename,
struct dentry *parent,
umode_t mode,
struct rchan_buf *buf,
int *is_global)
{
struct dentry *buf_file;
/*
* This to enable the use of a single buffer for the relay channel and
* correspondingly have a single file exposed to User, through which
* it can collect the logs in order without any post-processing.
* Need to set 'is_global' even if parent is NULL for early logging.
*/
*is_global = 1;
if (!parent)
return NULL;
buf_file = debugfs_create_file(filename, mode,
parent, buf, &relay_file_operations);
if (IS_ERR(buf_file))
return NULL;
return buf_file;
}
/*
* file_remove() default callback. Removes relay file in debugfs.
*/
static int remove_buf_file_callback(struct dentry *dentry)
{
debugfs_remove(dentry);
return 0;
}
/* relay channel callbacks */
static const struct rchan_callbacks relay_callbacks = {
.subbuf_start = subbuf_start_callback,
.create_buf_file = create_buf_file_callback,
.remove_buf_file = remove_buf_file_callback,
};
static void guc_move_to_next_buf(struct intel_guc_log *log)
{
/*
* Make sure the updates made in the sub buffer are visible when
* Consumer sees the following update to offset inside the sub buffer.
*/
smp_wmb();
/* All data has been written, so now move the offset of sub buffer. */
relay_reserve(log->relay.channel, log->vma->obj->base.size -
intel_guc_log_section_size_capture(log));
/* Switch to the next sub buffer */
relay_flush(log->relay.channel);
}
static void *guc_get_write_buffer(struct intel_guc_log *log)
{
/*
* Just get the base address of a new sub buffer and copy data into it
* ourselves. NULL will be returned in no-overwrite mode, if all sub
* buffers are full. Could have used the relay_write() to indirectly
* copy the data, but that would have been bit convoluted, as we need to
* write to only certain locations inside a sub buffer which cannot be
* done without using relay_reserve() along with relay_write(). So its
* better to use relay_reserve() alone.
*/
return relay_reserve(log->relay.channel, 0);
}
bool intel_guc_check_log_buf_overflow(struct intel_guc_log *log,
enum guc_log_buffer_type type,
unsigned int full_cnt)
{
unsigned int prev_full_cnt = log->stats[type].sampled_overflow;
bool overflow = false;
if (full_cnt != prev_full_cnt) {
overflow = true;
log->stats[type].overflow = full_cnt;
log->stats[type].sampled_overflow += full_cnt - prev_full_cnt;
if (full_cnt < prev_full_cnt) {
/* buffer_full_cnt is a 4 bit counter */
log->stats[type].sampled_overflow += 16;
}
guc_notice_ratelimited(log_to_guc(log), "log buffer overflow\n");
}
return overflow;
}
unsigned int intel_guc_get_log_buffer_size(struct intel_guc_log *log,
enum guc_log_buffer_type type)
{
switch (type) {
case GUC_DEBUG_LOG_BUFFER:
return intel_guc_log_section_size_debug(log);
case GUC_CRASH_DUMP_LOG_BUFFER:
return intel_guc_log_section_size_crash(log);
case GUC_CAPTURE_LOG_BUFFER:
return intel_guc_log_section_size_capture(log);
default:
MISSING_CASE(type);
}
return 0;
}
size_t intel_guc_get_log_buffer_offset(struct intel_guc_log *log,
enum guc_log_buffer_type type)
{
enum guc_log_buffer_type i;
size_t offset = PAGE_SIZE;/* for the log_buffer_states */
for (i = GUC_DEBUG_LOG_BUFFER; i < GUC_MAX_LOG_BUFFER; ++i) {
if (i == type)
break;
offset += intel_guc_get_log_buffer_size(log, i);
}
return offset;
}
static void _guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
struct guc_log_buffer_state log_buf_state_local;
enum guc_log_buffer_type type;
void *src_data, *dst_data;
bool new_overflow;
mutex_lock(&log->relay.lock);
if (guc_WARN_ON(guc, !intel_guc_log_relay_created(log)))
goto out_unlock;
/* Get the pointer to shared GuC log buffer */
src_data = log->buf_addr;
log_buf_state = src_data;
/* Get the pointer to local buffer to store the logs */
log_buf_snapshot_state = dst_data = guc_get_write_buffer(log);
if (unlikely(!log_buf_snapshot_state)) {
/*
* Used rate limited to avoid deluge of messages, logs might be
* getting consumed by User at a slow rate.
*/
guc_err_ratelimited(guc, "no sub-buffer to copy general logs\n");
log->relay.full_count++;
goto out_unlock;
}
/* Actual logs are present from the 2nd page */
src_data += PAGE_SIZE;
dst_data += PAGE_SIZE;
/* For relay logging, we exclude error state capture */
for (type = GUC_DEBUG_LOG_BUFFER; type <= GUC_CRASH_DUMP_LOG_BUFFER; type++) {
/*
* Make a copy of the state structure, inside GuC log buffer
* (which is uncached mapped), on the stack to avoid reading
* from it multiple times.
*/
memcpy(&log_buf_state_local, log_buf_state,
sizeof(struct guc_log_buffer_state));
buffer_size = intel_guc_get_log_buffer_size(log, type);
read_offset = log_buf_state_local.read_ptr;
write_offset = log_buf_state_local.sampled_write_ptr;
full_cnt = log_buf_state_local.buffer_full_cnt;
/* Bookkeeping stuff */
log->stats[type].flush += log_buf_state_local.flush_to_file;
new_overflow = intel_guc_check_log_buf_overflow(log, type, full_cnt);
/* Update the state of shared log buffer */
log_buf_state->read_ptr = write_offset;
log_buf_state->flush_to_file = 0;
log_buf_state++;
/* First copy the state structure in snapshot buffer */
memcpy(log_buf_snapshot_state, &log_buf_state_local,
sizeof(struct guc_log_buffer_state));
/*
* The write pointer could have been updated by GuC firmware,
* after sending the flush interrupt to Host, for consistency
* set write pointer value to same value of sampled_write_ptr
* in the snapshot buffer.
*/
log_buf_snapshot_state->write_ptr = write_offset;
log_buf_snapshot_state++;
/* Now copy the actual logs. */
if (unlikely(new_overflow)) {
/* copy the whole buffer in case of overflow */
read_offset = 0;
write_offset = buffer_size;
} else if (unlikely((read_offset > buffer_size) ||
(write_offset > buffer_size))) {
guc_err(guc, "invalid log buffer state\n");
/* copy whole buffer as offsets are unreliable */
read_offset = 0;
write_offset = buffer_size;
}
/* Just copy the newly written data */
if (read_offset > write_offset) {
i915_memcpy_from_wc(dst_data, src_data, write_offset);
bytes_to_copy = buffer_size - read_offset;
} else {
bytes_to_copy = write_offset - read_offset;
}
i915_memcpy_from_wc(dst_data + read_offset,
src_data + read_offset, bytes_to_copy);
src_data += buffer_size;
dst_data += buffer_size;
}
guc_move_to_next_buf(log);
out_unlock:
mutex_unlock(&log->relay.lock);
}
static void copy_debug_logs_work(struct work_struct *work)
{
struct intel_guc_log *log =
container_of(work, struct intel_guc_log, relay.flush_work);
guc_log_copy_debuglogs_for_relay(log);
}
static int guc_log_relay_map(struct intel_guc_log *log)
{
lockdep_assert_held(&log->relay.lock);
if (!log->vma || !log->buf_addr)
return -ENODEV;
/*
* WC vmalloc mapping of log buffer pages was done at
* GuC Log Init time, but lets keep a ref for book-keeping
*/
i915_gem_object_get(log->vma->obj);
log->relay.buf_in_use = true;
return 0;
}
static void guc_log_relay_unmap(struct intel_guc_log *log)
{
lockdep_assert_held(&log->relay.lock);
i915_gem_object_put(log->vma->obj);
log->relay.buf_in_use = false;
}
void intel_guc_log_init_early(struct intel_guc_log *log)
{
mutex_init(&log->relay.lock);
INIT_WORK(&log->relay.flush_work, copy_debug_logs_work);
log->relay.started = false;
}
static int guc_log_relay_create(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct rchan *guc_log_relay_chan;
size_t n_subbufs, subbuf_size;
int ret;
lockdep_assert_held(&log->relay.lock);
GEM_BUG_ON(!log->vma);
/*
* Keep the size of sub buffers same as shared log buffer
* but GuC log-events excludes the error-state-capture logs
*/
subbuf_size = log->vma->size - intel_guc_log_section_size_capture(log);
/*
* Store up to 8 snapshots, which is large enough to buffer sufficient
* boot time logs and provides enough leeway to User, in terms of
* latency, for consuming the logs from relay. Also doesn't take
* up too much memory.
*/
n_subbufs = 8;
if (!guc->dbgfs_node)
return -ENOENT;
guc_log_relay_chan = relay_open("guc_log",
guc->dbgfs_node,
subbuf_size, n_subbufs,
&relay_callbacks, i915);
if (!guc_log_relay_chan) {
guc_err(guc, "Couldn't create relay channel for logging\n");
ret = -ENOMEM;
return ret;
}
GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
log->relay.channel = guc_log_relay_chan;
return 0;
}
static void guc_log_relay_destroy(struct intel_guc_log *log)
{
lockdep_assert_held(&log->relay.lock);
relay_close(log->relay.channel);
log->relay.channel = NULL;
}
static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
intel_wakeref_t wakeref;
_guc_log_copy_debuglogs_for_relay(log);
/*
* Generally device is expected to be active only at this
* time, so get/put should be really quick.
*/
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
guc_action_flush_log_complete(guc);
}
static u32 __get_default_log_level(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
/* A negative value means "use platform/config default" */
if (i915->params.guc_log_level < 0) {
return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ?
GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_NON_VERBOSE;
}
if (i915->params.guc_log_level > GUC_LOG_LEVEL_MAX) {
guc_warn(guc, "Log verbosity param out of range: %d > %d!\n",
i915->params.guc_log_level, GUC_LOG_LEVEL_MAX);
return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ?
GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_DISABLED;
}
GEM_BUG_ON(i915->params.guc_log_level < GUC_LOG_LEVEL_DISABLED);
GEM_BUG_ON(i915->params.guc_log_level > GUC_LOG_LEVEL_MAX);
return i915->params.guc_log_level;
}
int intel_guc_log_create(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
struct i915_vma *vma;
void *vaddr;
u32 guc_log_size;
int ret;
GEM_BUG_ON(log->vma);
guc_log_size = intel_guc_log_size(log);
vma = intel_guc_allocate_vma(guc, guc_log_size);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
}
log->vma = vma;
/*
* Create a WC (Uncached for read) vmalloc mapping up front immediate access to
* data from memory during critical events such as error capture
*/
vaddr = i915_gem_object_pin_map_unlocked(log->vma->obj, I915_MAP_WC);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
i915_vma_unpin_and_release(&log->vma, 0);
goto err;
}
log->buf_addr = vaddr;
log->level = __get_default_log_level(log);
guc_dbg(guc, "guc_log_level=%d (%s, verbose:%s, verbosity:%d)\n",
log->level, str_enabled_disabled(log->level),
str_yes_no(GUC_LOG_LEVEL_IS_VERBOSE(log->level)),
GUC_LOG_LEVEL_TO_VERBOSITY(log->level));
return 0;
err:
guc_err(guc, "Failed to allocate or map log buffer %pe\n", ERR_PTR(ret));
return ret;
}
void intel_guc_log_destroy(struct intel_guc_log *log)
{
log->buf_addr = NULL;
i915_vma_unpin_and_release(&log->vma, I915_VMA_RELEASE_MAP);
}
int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
{
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
intel_wakeref_t wakeref;
int ret = 0;
BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0);
GEM_BUG_ON(!log->vma);
/*
* GuC is recognizing log levels starting from 0 to max, we're using 0
* as indication that logging should be disabled.
*/
if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX)
return -EINVAL;
mutex_lock(&i915->drm.struct_mutex);
if (log->level == level)
goto out_unlock;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
ret = guc_action_control_log(guc,
GUC_LOG_LEVEL_IS_VERBOSE(level),
GUC_LOG_LEVEL_IS_ENABLED(level),
GUC_LOG_LEVEL_TO_VERBOSITY(level));
if (ret) {
guc_dbg(guc, "guc_log_control action failed %pe\n", ERR_PTR(ret));
goto out_unlock;
}
log->level = level;
out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
bool intel_guc_log_relay_created(const struct intel_guc_log *log)
{
return log->buf_addr;
}
int intel_guc_log_relay_open(struct intel_guc_log *log)
{
int ret;
if (!log->vma)
return -ENODEV;
mutex_lock(&log->relay.lock);
if (intel_guc_log_relay_created(log)) {
ret = -EEXIST;
goto out_unlock;
}
/*
* We require SSE 4.1 for fast reads from the GuC log buffer and
* it should be present on the chipsets supporting GuC based
* submissions.
*/
if (!i915_has_memcpy_from_wc()) {
ret = -ENXIO;
goto out_unlock;
}
ret = guc_log_relay_create(log);
if (ret)
goto out_unlock;
ret = guc_log_relay_map(log);
if (ret)
goto out_relay;
mutex_unlock(&log->relay.lock);
return 0;
out_relay:
guc_log_relay_destroy(log);
out_unlock:
mutex_unlock(&log->relay.lock);
return ret;
}
int intel_guc_log_relay_start(struct intel_guc_log *log)
{
if (log->relay.started)
return -EEXIST;
/*
* When GuC is logging without us relaying to userspace, we're ignoring
* the flush notification. This means that we need to unconditionally
* flush on relay enabling, since GuC only notifies us once.
*/
queue_work(system_highpri_wq, &log->relay.flush_work);
log->relay.started = true;
return 0;
}
void intel_guc_log_relay_flush(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
intel_wakeref_t wakeref;
if (!log->relay.started)
return;
/*
* Before initiating the forceful flush, wait for any pending/ongoing
* flush to complete otherwise forceful flush may not actually happen.
*/
flush_work(&log->relay.flush_work);
with_intel_runtime_pm(guc_to_gt(guc)->uncore->rpm, wakeref)
guc_action_flush_log(guc);
/* GuC would have updated log buffer by now, so copy it */
guc_log_copy_debuglogs_for_relay(log);
}
/*
* Stops the relay log. Called from intel_guc_log_relay_close(), so no
* possibility of race with start/flush since relay_write cannot race
* relay_close.
*/
static void guc_log_relay_stop(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
if (!log->relay.started)
return;
intel_synchronize_irq(i915);
flush_work(&log->relay.flush_work);
log->relay.started = false;
}
void intel_guc_log_relay_close(struct intel_guc_log *log)
{
guc_log_relay_stop(log);
mutex_lock(&log->relay.lock);
GEM_BUG_ON(!intel_guc_log_relay_created(log));
guc_log_relay_unmap(log);
guc_log_relay_destroy(log);
mutex_unlock(&log->relay.lock);
}
void intel_guc_log_handle_flush_event(struct intel_guc_log *log)
{
if (log->relay.started)
queue_work(system_highpri_wq, &log->relay.flush_work);
}
static const char *
stringify_guc_log_type(enum guc_log_buffer_type type)
{
switch (type) {
case GUC_DEBUG_LOG_BUFFER:
return "DEBUG";
case GUC_CRASH_DUMP_LOG_BUFFER:
return "CRASH";
case GUC_CAPTURE_LOG_BUFFER:
return "CAPTURE";
default:
MISSING_CASE(type);
}
return "";
}
/**
* intel_guc_log_info - dump information about GuC log relay
* @log: the GuC log
* @p: the &drm_printer
*
* Pretty printer for GuC log info
*/
void intel_guc_log_info(struct intel_guc_log *log, struct drm_printer *p)
{
enum guc_log_buffer_type type;
if (!intel_guc_log_relay_created(log)) {
drm_puts(p, "GuC log relay not created\n");
return;
}
drm_puts(p, "GuC logging stats:\n");
drm_printf(p, "\tRelay full count: %u\n", log->relay.full_count);
for (type = GUC_DEBUG_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
drm_printf(p, "\t%s:\tflush count %10u, overflow count %10u\n",
stringify_guc_log_type(type),
log->stats[type].flush,
log->stats[type].sampled_overflow);
}
}
/**
* intel_guc_log_dump - dump the contents of the GuC log
* @log: the GuC log
* @p: the &drm_printer
* @dump_load_err: dump the log saved on GuC load error
*
* Pretty printer for the GuC log
*/
int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p,
bool dump_load_err)
{
struct intel_guc *guc = log_to_guc(log);
struct intel_uc *uc = container_of(guc, struct intel_uc, guc);
struct drm_i915_gem_object *obj = NULL;
void *map;
u32 *page;
int i, j;
if (!intel_guc_is_supported(guc))
return -ENODEV;
if (dump_load_err)
obj = uc->load_err_log;
else if (guc->log.vma)
obj = guc->log.vma->obj;
if (!obj)
return 0;
page = (u32 *)__get_free_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
intel_guc_dump_time_info(guc, p);
map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(map)) {
guc_dbg(guc, "Failed to pin log object: %pe\n", map);
drm_puts(p, "(log data unaccessible)\n");
free_page((unsigned long)page);
return PTR_ERR(map);
}
for (i = 0; i < obj->base.size; i += PAGE_SIZE) {
if (!i915_memcpy_from_wc(page, map + i, PAGE_SIZE))
memcpy(page, map + i, PAGE_SIZE);
for (j = 0; j < PAGE_SIZE / sizeof(u32); j += 4)
drm_printf(p, "0x%08x 0x%08x 0x%08x 0x%08x\n",
*(page + j + 0), *(page + j + 1),
*(page + j + 2), *(page + j + 3));
}
drm_puts(p, "\n");
i915_gem_object_unpin_map(obj);
free_page((unsigned long)page);
return 0;
}
| linux-master | drivers/gpu/drm/i915/gt/uc/intel_guc_log.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2022 Intel Corporation
*/
#include <linux/types.h>
#include "gt/intel_gt.h"
#include "gt/intel_gt_print.h"
#include "intel_gsc_fw.h"
#include "intel_gsc_proxy.h"
#include "intel_gsc_uc.h"
#include "i915_drv.h"
#include "i915_reg.h"
static void gsc_work(struct work_struct *work)
{
struct intel_gsc_uc *gsc = container_of(work, typeof(*gsc), work);
struct intel_gt *gt = gsc_uc_to_gt(gsc);
intel_wakeref_t wakeref;
u32 actions;
int ret;
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
spin_lock_irq(gt->irq_lock);
actions = gsc->gsc_work_actions;
gsc->gsc_work_actions = 0;
spin_unlock_irq(gt->irq_lock);
if (actions & GSC_ACTION_FW_LOAD) {
ret = intel_gsc_uc_fw_upload(gsc);
if (!ret)
/* setup proxy on a new load */
actions |= GSC_ACTION_SW_PROXY;
else if (ret != -EEXIST)
goto out_put;
/*
* The HuC auth can be done both before or after the proxy init;
* if done after, a proxy request will be issued and must be
* serviced before the authentication can complete.
* Since this worker also handles proxy requests, we can't
* perform an action that requires the proxy from within it and
* then stall waiting for it, because we'd be blocking the
* service path. Therefore, it is easier for us to load HuC
* first and do proxy later. The GSC will ack the HuC auth and
* then send the HuC proxy request as part of the proxy init
* flow.
* Note that we can only do the GSC auth if the GuC auth was
* successful.
*/
if (intel_uc_uses_huc(>->uc) &&
intel_huc_is_authenticated(>->uc.huc, INTEL_HUC_AUTH_BY_GUC))
intel_huc_auth(>->uc.huc, INTEL_HUC_AUTH_BY_GSC);
}
if (actions & GSC_ACTION_SW_PROXY) {
if (!intel_gsc_uc_fw_init_done(gsc)) {
gt_err(gt, "Proxy request received with GSC not loaded!\n");
goto out_put;
}
ret = intel_gsc_proxy_request_handler(gsc);
if (ret) {
if (actions & GSC_ACTION_FW_LOAD) {
/*
* A proxy failure right after firmware load means the proxy-init
* step has failed so mark GSC as not usable after this
*/
drm_err(>->i915->drm,
"GSC proxy handler failed to init\n");
intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
}
goto out_put;
}
/* mark the GSC FW init as done the first time we run this */
if (actions & GSC_ACTION_FW_LOAD) {
/*
* If there is a proxy establishment error, the GSC might still
* complete the request handling cleanly, so we need to check the
* status register to check if the proxy init was actually successful
*/
if (intel_gsc_uc_fw_proxy_init_done(gsc, false)) {
drm_dbg(>->i915->drm, "GSC Proxy initialized\n");
intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_RUNNING);
} else {
drm_err(>->i915->drm,
"GSC status reports proxy init not complete\n");
intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
}
}
}
out_put:
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
}
static bool gsc_engine_supported(struct intel_gt *gt)
{
intel_engine_mask_t mask;
/*
* We reach here from i915_driver_early_probe for the primary GT before
* its engine mask is set, so we use the device info engine mask for it.
* For other GTs we expect the GT-specific mask to be set before we
* call this function.
*/
GEM_BUG_ON(!gt_is_root(gt) && !gt->info.engine_mask);
if (gt_is_root(gt))
mask = INTEL_INFO(gt->i915)->platform_engine_mask;
else
mask = gt->info.engine_mask;
return __HAS_ENGINE(mask, GSC0);
}
void intel_gsc_uc_init_early(struct intel_gsc_uc *gsc)
{
struct intel_gt *gt = gsc_uc_to_gt(gsc);
/*
* GSC FW needs to be copied to a dedicated memory allocations for
* loading (see gsc->local), so we don't need to GGTT map the FW image
* itself into GGTT.
*/
intel_uc_fw_init_early(&gsc->fw, INTEL_UC_FW_TYPE_GSC, false);
INIT_WORK(&gsc->work, gsc_work);
/* we can arrive here from i915_driver_early_probe for primary
* GT with it being not fully setup hence check device info's
* engine mask
*/
if (!gsc_engine_supported(gt)) {
intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_NOT_SUPPORTED);
return;
}
gsc->wq = alloc_ordered_workqueue("i915_gsc", 0);
if (!gsc->wq) {
gt_err(gt, "failed to allocate WQ for GSC, disabling FW\n");
intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_NOT_SUPPORTED);
}
}
static int gsc_allocate_and_map_vma(struct intel_gsc_uc *gsc, u32 size)
{
struct intel_gt *gt = gsc_uc_to_gt(gsc);
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
void __iomem *vaddr;
int ret = 0;
/*
* The GSC FW doesn't immediately suspend after becoming idle, so there
* is a chance that it could still be awake after we successfully
* return from the pci suspend function, even if there are no pending
* operations.
* The FW might therefore try to access memory for its suspend operation
* after the kernel has completed the HW suspend flow; this can cause
* issues if the FW is mapped in normal RAM memory, as some of the
* involved HW units might've already lost power.
* The driver must therefore avoid this situation and the recommended
* way to do so is to use stolen memory for the GSC memory allocation,
* because stolen memory takes a different path in HW and it is
* guaranteed to always work as long as the GPU itself is awake (which
* it must be if the GSC is awake).
*/
obj = i915_gem_object_create_stolen(gt->i915, size);
if (IS_ERR(obj))
return PTR_ERR(obj);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
}
vaddr = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
goto err;
}
i915_vma_make_unshrinkable(vma);
gsc->local = vma;
gsc->local_vaddr = vaddr;
return 0;
err:
i915_gem_object_put(obj);
return ret;
}
static void gsc_unmap_and_free_vma(struct intel_gsc_uc *gsc)
{
struct i915_vma *vma = fetch_and_zero(&gsc->local);
if (!vma)
return;
gsc->local_vaddr = NULL;
i915_vma_unpin_iomap(vma);
i915_gem_object_put(vma->obj);
}
int intel_gsc_uc_init(struct intel_gsc_uc *gsc)
{
static struct lock_class_key gsc_lock;
struct intel_gt *gt = gsc_uc_to_gt(gsc);
struct intel_engine_cs *engine = gt->engine[GSC0];
struct intel_context *ce;
int err;
err = intel_uc_fw_init(&gsc->fw);
if (err)
goto out;
err = gsc_allocate_and_map_vma(gsc, SZ_4M);
if (err)
goto out_fw;
ce = intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K,
I915_GEM_HWS_GSC_ADDR,
&gsc_lock, "gsc_context");
if (IS_ERR(ce)) {
gt_err(gt, "failed to create GSC CS ctx for FW communication\n");
err = PTR_ERR(ce);
goto out_vma;
}
gsc->ce = ce;
/* if we fail to init proxy we still want to load GSC for PM */
intel_gsc_proxy_init(gsc);
intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_LOADABLE);
return 0;
out_vma:
gsc_unmap_and_free_vma(gsc);
out_fw:
intel_uc_fw_fini(&gsc->fw);
out:
gt_probe_error(gt, "GSC init failed %pe\n", ERR_PTR(err));
return err;
}
void intel_gsc_uc_fini(struct intel_gsc_uc *gsc)
{
if (!intel_uc_fw_is_loadable(&gsc->fw))
return;
flush_work(&gsc->work);
if (gsc->wq) {
destroy_workqueue(gsc->wq);
gsc->wq = NULL;
}
intel_gsc_proxy_fini(gsc);
if (gsc->ce)
intel_engine_destroy_pinned_context(fetch_and_zero(&gsc->ce));
gsc_unmap_and_free_vma(gsc);
intel_uc_fw_fini(&gsc->fw);
}
void intel_gsc_uc_flush_work(struct intel_gsc_uc *gsc)
{
if (!intel_uc_fw_is_loadable(&gsc->fw))
return;
flush_work(&gsc->work);
}
void intel_gsc_uc_resume(struct intel_gsc_uc *gsc)
{
if (!intel_uc_fw_is_loadable(&gsc->fw))
return;
/*
* we only want to start the GSC worker from here in the actual resume
* flow and not during driver load. This is because GSC load is slow and
* therefore we want to make sure that the default state init completes
* first to not slow down the init thread. A separate call to
* intel_gsc_uc_load_start will ensure that the GSC is loaded during
* driver load.
*/
if (!gsc_uc_to_gt(gsc)->engine[GSC0]->default_state)
return;
intel_gsc_uc_load_start(gsc);
}
void intel_gsc_uc_load_start(struct intel_gsc_uc *gsc)
{
struct intel_gt *gt = gsc_uc_to_gt(gsc);
if (!intel_uc_fw_is_loadable(&gsc->fw))
return;
if (intel_gsc_uc_fw_init_done(gsc))
return;
spin_lock_irq(gt->irq_lock);
gsc->gsc_work_actions |= GSC_ACTION_FW_LOAD;
spin_unlock_irq(gt->irq_lock);
queue_work(gsc->wq, &gsc->work);
}
void intel_gsc_uc_load_status(struct intel_gsc_uc *gsc, struct drm_printer *p)
{
struct intel_gt *gt = gsc_uc_to_gt(gsc);
struct intel_uncore *uncore = gt->uncore;
intel_wakeref_t wakeref;
if (!intel_gsc_uc_is_supported(gsc)) {
drm_printf(p, "GSC not supported\n");
return;
}
if (!intel_gsc_uc_is_wanted(gsc)) {
drm_printf(p, "GSC disabled\n");
return;
}
drm_printf(p, "GSC firmware: %s\n", gsc->fw.file_selected.path);
if (gsc->fw.file_selected.path != gsc->fw.file_wanted.path)
drm_printf(p, "GSC firmware wanted: %s\n", gsc->fw.file_wanted.path);
drm_printf(p, "\tstatus: %s\n", intel_uc_fw_status_repr(gsc->fw.status));
drm_printf(p, "Release: %u.%u.%u.%u\n",
gsc->release.major, gsc->release.minor,
gsc->release.patch, gsc->release.build);
drm_printf(p, "Compatibility Version: %u.%u [min expected %u.%u]\n",
gsc->fw.file_selected.ver.major, gsc->fw.file_selected.ver.minor,
gsc->fw.file_wanted.ver.major, gsc->fw.file_wanted.ver.minor);
drm_printf(p, "SVN: %u\n", gsc->security_version);
with_intel_runtime_pm(uncore->rpm, wakeref) {
u32 i;
for (i = 1; i <= 6; i++) {
u32 status = intel_uncore_read(uncore,
HECI_FWSTS(MTL_GSC_HECI1_BASE, i));
drm_printf(p, "HECI1 FWSTST%u = 0x%08x\n", i, status);
}
}
}
| linux-master | drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
#include <drm/drm_print.h>
#include "gt/intel_gt_debugfs.h"
#include "intel_huc.h"
#include "intel_huc_debugfs.h"
static int huc_info_show(struct seq_file *m, void *data)
{
struct intel_huc *huc = m->private;
struct drm_printer p = drm_seq_file_printer(m);
if (!intel_huc_is_supported(huc))
return -ENODEV;
intel_huc_load_status(huc, &p);
return 0;
}
DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(huc_info);
void intel_huc_debugfs_register(struct intel_huc *huc, struct dentry *root)
{
static const struct intel_gt_debugfs_file files[] = {
{ "huc_info", &huc_info_fops, NULL },
};
if (!intel_huc_is_supported(huc))
return;
intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), huc);
}
| linux-master | drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2022 Intel Corporation
*/
#include "gt/intel_gt.h"
#include "gt/intel_hwconfig.h"
#include "i915_drv.h"
#include "i915_memcpy.h"
/*
* GuC has a blob containing hardware configuration information (HWConfig).
* This is formatted as a simple and flexible KLV (Key/Length/Value) table.
*
* For example, a minimal version could be:
* enum device_attr {
* ATTR_SOME_VALUE = 0,
* ATTR_SOME_MASK = 1,
* };
*
* static const u32 hwconfig[] = {
* ATTR_SOME_VALUE,
* 1, // Value Length in DWords
* 8, // Value
*
* ATTR_SOME_MASK,
* 3,
* 0x00FFFFFFFF, 0xFFFFFFFF, 0xFF000000,
* };
*
* The attribute ids are defined in a hardware spec.
*/
static int __guc_action_get_hwconfig(struct intel_guc *guc,
u32 ggtt_offset, u32 ggtt_size)
{
u32 action[] = {
INTEL_GUC_ACTION_GET_HWCONFIG,
lower_32_bits(ggtt_offset),
upper_32_bits(ggtt_offset),
ggtt_size,
};
int ret;
ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
if (ret == -ENXIO)
return -ENOENT;
return ret;
}
static int guc_hwconfig_discover_size(struct intel_guc *guc, struct intel_hwconfig *hwconfig)
{
int ret;
/*
* Sending a query with zero offset and size will return the
* size of the blob.
*/
ret = __guc_action_get_hwconfig(guc, 0, 0);
if (ret < 0)
return ret;
if (ret == 0)
return -EINVAL;
hwconfig->size = ret;
return 0;
}
static int guc_hwconfig_fill_buffer(struct intel_guc *guc, struct intel_hwconfig *hwconfig)
{
struct i915_vma *vma;
u32 ggtt_offset;
void *vaddr;
int ret;
GEM_BUG_ON(!hwconfig->size);
ret = intel_guc_allocate_and_map_vma(guc, hwconfig->size, &vma, &vaddr);
if (ret)
return ret;
ggtt_offset = intel_guc_ggtt_offset(guc, vma);
ret = __guc_action_get_hwconfig(guc, ggtt_offset, hwconfig->size);
if (ret >= 0)
memcpy(hwconfig->ptr, vaddr, hwconfig->size);
i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP);
return ret;
}
static bool has_table(struct drm_i915_private *i915)
{
if (IS_ALDERLAKE_P(i915) && !IS_ALDERLAKE_P_N(i915))
return true;
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
return true;
return false;
}
/*
* intel_guc_hwconfig_init - Initialize the HWConfig
*
* Retrieve the HWConfig table from the GuC and save it locally.
* It can then be queried on demand by other users later on.
*/
static int guc_hwconfig_init(struct intel_gt *gt)
{
struct intel_hwconfig *hwconfig = >->info.hwconfig;
struct intel_guc *guc = >->uc.guc;
int ret;
if (!has_table(gt->i915))
return 0;
ret = guc_hwconfig_discover_size(guc, hwconfig);
if (ret)
return ret;
hwconfig->ptr = kmalloc(hwconfig->size, GFP_KERNEL);
if (!hwconfig->ptr) {
hwconfig->size = 0;
return -ENOMEM;
}
ret = guc_hwconfig_fill_buffer(guc, hwconfig);
if (ret < 0) {
intel_gt_fini_hwconfig(gt);
return ret;
}
return 0;
}
/*
* intel_gt_init_hwconfig - Initialize the HWConfig if available
*
* Retrieve the HWConfig table if available on the current platform.
*/
int intel_gt_init_hwconfig(struct intel_gt *gt)
{
if (!intel_uc_uses_guc(>->uc))
return 0;
return guc_hwconfig_init(gt);
}
/*
* intel_gt_fini_hwconfig - Finalize the HWConfig
*
* Free up the memory allocation holding the table.
*/
void intel_gt_fini_hwconfig(struct intel_gt *gt)
{
struct intel_hwconfig *hwconfig = >->info.hwconfig;
kfree(hwconfig->ptr);
hwconfig->size = 0;
hwconfig->ptr = NULL;
}
| linux-master | drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2014-2019 Intel Corporation
*/
#include "gem/i915_gem_lmem.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_irq.h"
#include "gt/intel_gt_pm_irq.h"
#include "gt/intel_gt_regs.h"
#include "intel_guc.h"
#include "intel_guc_ads.h"
#include "intel_guc_capture.h"
#include "intel_guc_print.h"
#include "intel_guc_slpc.h"
#include "intel_guc_submission.h"
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
/**
* DOC: GuC
*
* The GuC is a microcontroller inside the GT HW, introduced in gen9. The GuC is
* designed to offload some of the functionality usually performed by the host
* driver; currently the main operations it can take care of are:
*
* - Authentication of the HuC, which is required to fully enable HuC usage.
* - Low latency graphics context scheduling (a.k.a. GuC submission).
* - GT Power management.
*
* The enable_guc module parameter can be used to select which of those
* operations to enable within GuC. Note that not all the operations are
* supported on all gen9+ platforms.
*
* Enabling the GuC is not mandatory and therefore the firmware is only loaded
* if at least one of the operations is selected. However, not loading the GuC
* might result in the loss of some features that do require the GuC (currently
* just the HuC, but more are expected to land in the future).
*/
void intel_guc_notify(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
/*
* On Gen11+, the value written to the register is passes as a payload
* to the FW. However, the FW currently treats all values the same way
* (H2G interrupt), so we can just write the value that the HW expects
* on older gens.
*/
intel_uncore_write(gt->uncore, guc->notify_reg, GUC_SEND_TRIGGER);
}
static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
{
GEM_BUG_ON(!guc->send_regs.base);
GEM_BUG_ON(!guc->send_regs.count);
GEM_BUG_ON(i >= guc->send_regs.count);
return _MMIO(guc->send_regs.base + 4 * i);
}
void intel_guc_init_send_regs(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
enum forcewake_domains fw_domains = 0;
unsigned int i;
GEM_BUG_ON(!guc->send_regs.base);
GEM_BUG_ON(!guc->send_regs.count);
for (i = 0; i < guc->send_regs.count; i++) {
fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore,
guc_send_reg(guc, i),
FW_REG_READ | FW_REG_WRITE);
}
guc->send_regs.fw_domains = fw_domains;
}
static void gen9_reset_guc_interrupts(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
assert_rpm_wakelock_held(>->i915->runtime_pm);
spin_lock_irq(gt->irq_lock);
gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
spin_unlock_irq(gt->irq_lock);
}
static void gen9_enable_guc_interrupts(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
assert_rpm_wakelock_held(>->i915->runtime_pm);
spin_lock_irq(gt->irq_lock);
guc_WARN_ON_ONCE(guc, intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
gt->pm_guc_events);
gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
spin_unlock_irq(gt->irq_lock);
guc->interrupts.enabled = true;
}
static void gen9_disable_guc_interrupts(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
assert_rpm_wakelock_held(>->i915->runtime_pm);
guc->interrupts.enabled = false;
spin_lock_irq(gt->irq_lock);
gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
spin_unlock_irq(gt->irq_lock);
intel_synchronize_irq(gt->i915);
gen9_reset_guc_interrupts(guc);
}
static bool __gen11_reset_guc_interrupts(struct intel_gt *gt)
{
u32 irq = gt->type == GT_MEDIA ? MTL_MGUC : GEN11_GUC;
lockdep_assert_held(gt->irq_lock);
return gen11_gt_reset_one_iir(gt, 0, irq);
}
static void gen11_reset_guc_interrupts(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
spin_lock_irq(gt->irq_lock);
__gen11_reset_guc_interrupts(gt);
spin_unlock_irq(gt->irq_lock);
}
static void gen11_enable_guc_interrupts(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
spin_lock_irq(gt->irq_lock);
__gen11_reset_guc_interrupts(gt);
spin_unlock_irq(gt->irq_lock);
guc->interrupts.enabled = true;
}
static void gen11_disable_guc_interrupts(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
guc->interrupts.enabled = false;
intel_synchronize_irq(gt->i915);
gen11_reset_guc_interrupts(guc);
}
void intel_guc_init_early(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
struct drm_i915_private *i915 = gt->i915;
intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC, true);
intel_guc_ct_init_early(&guc->ct);
intel_guc_log_init_early(&guc->log);
intel_guc_submission_init_early(guc);
intel_guc_slpc_init_early(&guc->slpc);
intel_guc_rc_init_early(guc);
mutex_init(&guc->send_mutex);
spin_lock_init(&guc->irq_lock);
if (GRAPHICS_VER(i915) >= 11) {
guc->interrupts.reset = gen11_reset_guc_interrupts;
guc->interrupts.enable = gen11_enable_guc_interrupts;
guc->interrupts.disable = gen11_disable_guc_interrupts;
if (gt->type == GT_MEDIA) {
guc->notify_reg = MEDIA_GUC_HOST_INTERRUPT;
guc->send_regs.base = i915_mmio_reg_offset(MEDIA_SOFT_SCRATCH(0));
} else {
guc->notify_reg = GEN11_GUC_HOST_INTERRUPT;
guc->send_regs.base = i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
}
guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
} else {
guc->notify_reg = GUC_SEND_INTERRUPT;
guc->interrupts.reset = gen9_reset_guc_interrupts;
guc->interrupts.enable = gen9_enable_guc_interrupts;
guc->interrupts.disable = gen9_disable_guc_interrupts;
guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
}
intel_guc_enable_msg(guc, INTEL_GUC_RECV_MSG_EXCEPTION |
INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED);
}
void intel_guc_init_late(struct intel_guc *guc)
{
intel_guc_ads_init_late(guc);
}
static u32 guc_ctl_debug_flags(struct intel_guc *guc)
{
u32 level = intel_guc_log_get_level(&guc->log);
u32 flags = 0;
if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
flags |= GUC_LOG_DISABLED;
else
flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
GUC_LOG_VERBOSITY_SHIFT;
return flags;
}
static u32 guc_ctl_feature_flags(struct intel_guc *guc)
{
u32 flags = 0;
if (!intel_guc_submission_is_used(guc))
flags |= GUC_CTL_DISABLE_SCHEDULER;
if (intel_guc_slpc_is_used(guc))
flags |= GUC_CTL_ENABLE_SLPC;
return flags;
}
static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
{
struct intel_guc_log *log = &guc->log;
u32 offset, flags;
GEM_BUG_ON(!log->sizes_initialised);
offset = intel_guc_ggtt_offset(guc, log->vma) >> PAGE_SHIFT;
flags = GUC_LOG_VALID |
GUC_LOG_NOTIFY_ON_HALF_FULL |
log->sizes[GUC_LOG_SECTIONS_DEBUG].flag |
log->sizes[GUC_LOG_SECTIONS_CAPTURE].flag |
(log->sizes[GUC_LOG_SECTIONS_CRASH].count << GUC_LOG_CRASH_SHIFT) |
(log->sizes[GUC_LOG_SECTIONS_DEBUG].count << GUC_LOG_DEBUG_SHIFT) |
(log->sizes[GUC_LOG_SECTIONS_CAPTURE].count << GUC_LOG_CAPTURE_SHIFT) |
(offset << GUC_LOG_BUF_ADDR_SHIFT);
return flags;
}
static u32 guc_ctl_ads_flags(struct intel_guc *guc)
{
u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
u32 flags = ads << GUC_ADS_ADDR_SHIFT;
return flags;
}
static u32 guc_ctl_wa_flags(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
u32 flags = 0;
/* Wa_22012773006:gen11,gen12 < XeHP */
if (GRAPHICS_VER(gt->i915) >= 11 &&
GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 50))
flags |= GUC_WA_POLLCS;
/* Wa_16011759253:dg2_g10:a0 */
if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0))
flags |= GUC_WA_GAM_CREDITS;
/* Wa_14014475959 */
if (IS_MTL_GRAPHICS_STEP(gt->i915, M, STEP_A0, STEP_B0) ||
IS_DG2(gt->i915))
flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
/*
* Wa_14012197797:dg2_g10:a0,dg2_g11:a0
* Wa_22011391025:dg2_g10,dg2_g11,dg2_g12
*
* The same WA bit is used for both and 22011391025 is applicable to
* all DG2.
*/
if (IS_DG2(gt->i915))
flags |= GUC_WA_DUAL_QUEUE;
/* Wa_22011802037: graphics version 11/12 */
if (IS_MTL_GRAPHICS_STEP(gt->i915, M, STEP_A0, STEP_B0) ||
(GRAPHICS_VER(gt->i915) >= 11 &&
GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 70)))
flags |= GUC_WA_PRE_PARSER;
/* Wa_16011777198:dg2 */
if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) ||
IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0))
flags |= GUC_WA_RCS_RESET_BEFORE_RC6;
/*
* Wa_22012727170:dg2_g10[a0-c0), dg2_g11[a0..)
* Wa_22012727685:dg2_g11[a0..)
*/
if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) ||
IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_FOREVER))
flags |= GUC_WA_CONTEXT_ISOLATION;
/* Wa_16015675438 */
if (!RCS_MASK(gt))
flags |= GUC_WA_RCS_REGS_IN_CCS_REGS_LIST;
return flags;
}
static u32 guc_ctl_devid(struct intel_guc *guc)
{
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
return (INTEL_DEVID(i915) << 16) | INTEL_REVID(i915);
}
/*
* Initialise the GuC parameter block before starting the firmware
* transfer. These parameters are read by the firmware on startup
* and cannot be changed thereafter.
*/
static void guc_init_params(struct intel_guc *guc)
{
u32 *params = guc->params;
int i;
BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc);
params[GUC_CTL_WA] = guc_ctl_wa_flags(guc);
params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
guc_dbg(guc, "param[%2d] = %#x\n", i, params[i]);
}
/*
* Initialise the GuC parameter block before starting the firmware
* transfer. These parameters are read by the firmware on startup
* and cannot be changed thereafter.
*/
void intel_guc_write_params(struct intel_guc *guc)
{
struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
int i;
/*
* All SOFT_SCRATCH registers are in FORCEWAKE_GT domain and
* they are power context saved so it's ok to release forcewake
* when we are done here and take it again at xfer time.
*/
intel_uncore_forcewake_get(uncore, FORCEWAKE_GT);
intel_uncore_write(uncore, SOFT_SCRATCH(0), 0);
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]);
intel_uncore_forcewake_put(uncore, FORCEWAKE_GT);
}
void intel_guc_dump_time_info(struct intel_guc *guc, struct drm_printer *p)
{
struct intel_gt *gt = guc_to_gt(guc);
intel_wakeref_t wakeref;
u32 stamp = 0;
u64 ktime;
with_intel_runtime_pm(>->i915->runtime_pm, wakeref)
stamp = intel_uncore_read(gt->uncore, GUCPMTIMESTAMP);
ktime = ktime_get_boottime_ns();
drm_printf(p, "Kernel timestamp: 0x%08llX [%llu]\n", ktime, ktime);
drm_printf(p, "GuC timestamp: 0x%08X [%u]\n", stamp, stamp);
drm_printf(p, "CS timestamp frequency: %u Hz, %u ns\n",
gt->clock_frequency, gt->clock_period_ns);
}
int intel_guc_init(struct intel_guc *guc)
{
int ret;
ret = intel_uc_fw_init(&guc->fw);
if (ret)
goto out;
ret = intel_guc_log_create(&guc->log);
if (ret)
goto err_fw;
ret = intel_guc_capture_init(guc);
if (ret)
goto err_log;
ret = intel_guc_ads_create(guc);
if (ret)
goto err_capture;
GEM_BUG_ON(!guc->ads_vma);
ret = intel_guc_ct_init(&guc->ct);
if (ret)
goto err_ads;
if (intel_guc_submission_is_used(guc)) {
/*
* This is stuff we need to have available at fw load time
* if we are planning to enable submission later
*/
ret = intel_guc_submission_init(guc);
if (ret)
goto err_ct;
}
if (intel_guc_slpc_is_used(guc)) {
ret = intel_guc_slpc_init(&guc->slpc);
if (ret)
goto err_submission;
}
/* now that everything is perma-pinned, initialize the parameters */
guc_init_params(guc);
intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_LOADABLE);
return 0;
err_submission:
intel_guc_submission_fini(guc);
err_ct:
intel_guc_ct_fini(&guc->ct);
err_ads:
intel_guc_ads_destroy(guc);
err_capture:
intel_guc_capture_destroy(guc);
err_log:
intel_guc_log_destroy(&guc->log);
err_fw:
intel_uc_fw_fini(&guc->fw);
out:
intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_INIT_FAIL);
guc_probe_error(guc, "failed with %pe\n", ERR_PTR(ret));
return ret;
}
void intel_guc_fini(struct intel_guc *guc)
{
if (!intel_uc_fw_is_loadable(&guc->fw))
return;
if (intel_guc_slpc_is_used(guc))
intel_guc_slpc_fini(&guc->slpc);
if (intel_guc_submission_is_used(guc))
intel_guc_submission_fini(guc);
intel_guc_ct_fini(&guc->ct);
intel_guc_ads_destroy(guc);
intel_guc_capture_destroy(guc);
intel_guc_log_destroy(&guc->log);
intel_uc_fw_fini(&guc->fw);
}
/*
* This function implements the MMIO based host to GuC interface.
*/
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *request, u32 len,
u32 *response_buf, u32 response_buf_size)
{
struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
u32 header;
int i;
int ret;
GEM_BUG_ON(!len);
GEM_BUG_ON(len > guc->send_regs.count);
GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, request[0]) != GUC_HXG_ORIGIN_HOST);
GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, request[0]) != GUC_HXG_TYPE_REQUEST);
mutex_lock(&guc->send_mutex);
intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
retry:
for (i = 0; i < len; i++)
intel_uncore_write(uncore, guc_send_reg(guc, i), request[i]);
intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
intel_guc_notify(guc);
/*
* No GuC command should ever take longer than 10ms.
* Fast commands should still complete in 10us.
*/
ret = __intel_wait_for_register_fw(uncore,
guc_send_reg(guc, 0),
GUC_HXG_MSG_0_ORIGIN,
FIELD_PREP(GUC_HXG_MSG_0_ORIGIN,
GUC_HXG_ORIGIN_GUC),
10, 10, &header);
if (unlikely(ret)) {
timeout:
guc_err(guc, "mmio request %#x: no reply %x\n",
request[0], header);
goto out;
}
if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_BUSY) {
#define done ({ header = intel_uncore_read(uncore, guc_send_reg(guc, 0)); \
FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) != GUC_HXG_ORIGIN_GUC || \
FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != GUC_HXG_TYPE_NO_RESPONSE_BUSY; })
ret = wait_for(done, 1000);
if (unlikely(ret))
goto timeout;
if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) !=
GUC_HXG_ORIGIN_GUC))
goto proto;
#undef done
}
if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
u32 reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, header);
guc_dbg(guc, "mmio request %#x: retrying, reason %u\n",
request[0], reason);
goto retry;
}
if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_RESPONSE_FAILURE) {
u32 hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, header);
u32 error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, header);
guc_err(guc, "mmio request %#x: failure %x/%u\n",
request[0], error, hint);
ret = -ENXIO;
goto out;
}
if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != GUC_HXG_TYPE_RESPONSE_SUCCESS) {
proto:
guc_err(guc, "mmio request %#x: unexpected reply %#x\n",
request[0], header);
ret = -EPROTO;
goto out;
}
if (response_buf) {
int count = min(response_buf_size, guc->send_regs.count);
GEM_BUG_ON(!count);
response_buf[0] = header;
for (i = 1; i < count; i++)
response_buf[i] = intel_uncore_read(uncore,
guc_send_reg(guc, i));
/* Use number of copied dwords as our return value */
ret = count;
} else {
/* Use data from the GuC response as our return value */
ret = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, header);
}
out:
intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains);
mutex_unlock(&guc->send_mutex);
return ret;
}
int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
const u32 *payload, u32 len)
{
u32 msg;
if (unlikely(!len))
return -EPROTO;
/* Make sure to handle only enabled messages */
msg = payload[0] & guc->msg_enabled_mask;
if (msg & INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED)
guc_err(guc, "Received early crash dump notification!\n");
if (msg & INTEL_GUC_RECV_MSG_EXCEPTION)
guc_err(guc, "Received early exception notification!\n");
return 0;
}
/**
* intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
* @guc: intel_guc structure
* @rsa_offset: rsa offset w.r.t ggtt base of huc vma
*
* Triggers a HuC firmware authentication request to the GuC via intel_guc_send
* INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
* intel_huc_auth().
*
* Return: non-zero code on error
*/
int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
{
u32 action[] = {
INTEL_GUC_ACTION_AUTHENTICATE_HUC,
rsa_offset
};
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
/**
* intel_guc_suspend() - notify GuC entering suspend state
* @guc: the guc
*/
int intel_guc_suspend(struct intel_guc *guc)
{
int ret;
u32 action[] = {
INTEL_GUC_ACTION_CLIENT_SOFT_RESET,
};
if (!intel_guc_is_ready(guc))
return 0;
if (intel_guc_submission_is_used(guc)) {
/*
* This H2G MMIO command tears down the GuC in two steps. First it will
* generate a G2H CTB for every active context indicating a reset. In
* practice the i915 shouldn't ever get a G2H as suspend should only be
* called when the GPU is idle. Next, it tears down the CTBs and this
* H2G MMIO command completes.
*
* Don't abort on a failure code from the GuC. Keep going and do the
* clean up in santize() and re-initialisation on resume and hopefully
* the error here won't be problematic.
*/
ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
if (ret)
guc_err(guc, "suspend: RESET_CLIENT action failed with %pe\n",
ERR_PTR(ret));
}
/* Signal that the GuC isn't running. */
intel_guc_sanitize(guc);
return 0;
}
/**
* intel_guc_resume() - notify GuC resuming from suspend state
* @guc: the guc
*/
int intel_guc_resume(struct intel_guc *guc)
{
/*
* NB: This function can still be called even if GuC submission is
* disabled, e.g. if GuC is enabled for HuC authentication only. Thus,
* if any code is later added here, it must be support doing nothing
* if submission is disabled (as per intel_guc_suspend).
*/
return 0;
}
/**
* DOC: GuC Memory Management
*
* GuC can't allocate any memory for its own usage, so all the allocations must
* be handled by the host driver. GuC accesses the memory via the GGTT, with the
* exception of the top and bottom parts of the 4GB address space, which are
* instead re-mapped by the GuC HW to memory location of the FW itself (WOPCM)
* or other parts of the HW. The driver must take care not to place objects that
* the GuC is going to access in these reserved ranges. The layout of the GuC
* address space is shown below:
*
* ::
*
* +===========> +====================+ <== FFFF_FFFF
* ^ | Reserved |
* | +====================+ <== GUC_GGTT_TOP
* | | |
* | | DRAM |
* GuC | |
* Address +===> +====================+ <== GuC ggtt_pin_bias
* Space ^ | |
* | | | |
* | GuC | GuC |
* | WOPCM | WOPCM |
* | Size | |
* | | | |
* v v | |
* +=======+===> +====================+ <== 0000_0000
*
* The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM
* while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped
* to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size.
*/
/**
* intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
* @guc: the guc
* @size: size of area to allocate (both virtual space and memory)
*
* This is a wrapper to create an object for use with the GuC. In order to
* use it inside the GuC, an object needs to be pinned lifetime, so we allocate
* both some backing storage and a range inside the Global GTT. We must pin
* it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that
* range is reserved inside GuC.
*
* Return: A i915_vma if successful, otherwise an ERR_PTR.
*/
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
{
struct intel_gt *gt = guc_to_gt(guc);
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u64 flags;
int ret;
if (HAS_LMEM(gt->i915))
obj = i915_gem_object_create_lmem(gt->i915, size,
I915_BO_ALLOC_CPU_CLEAR |
I915_BO_ALLOC_CONTIGUOUS |
I915_BO_ALLOC_PM_EARLY);
else
obj = i915_gem_object_create_shmem(gt->i915, size);
if (IS_ERR(obj))
return ERR_CAST(obj);
/*
* Wa_22016122933: For Media version 13.0, all Media GT shared
* memory needs to be mapped as WC on CPU side and UC (PAT
* index 2) on GPU side.
*/
if (intel_gt_needs_wa_22016122933(gt))
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
if (IS_ERR(vma))
goto err;
flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
ret = i915_ggtt_pin(vma, NULL, 0, flags);
if (ret) {
vma = ERR_PTR(ret);
goto err;
}
return i915_vma_make_unshrinkable(vma);
err:
i915_gem_object_put(obj);
return vma;
}
/**
* intel_guc_allocate_and_map_vma() - Allocate and map VMA for GuC usage
* @guc: the guc
* @size: size of area to allocate (both virtual space and memory)
* @out_vma: return variable for the allocated vma pointer
* @out_vaddr: return variable for the obj mapping
*
* This wrapper calls intel_guc_allocate_vma() and then maps the allocated
* object with I915_MAP_WB.
*
* Return: 0 if successful, a negative errno code otherwise.
*/
int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
struct i915_vma **out_vma, void **out_vaddr)
{
struct i915_vma *vma;
void *vaddr;
vma = intel_guc_allocate_vma(guc, size);
if (IS_ERR(vma))
return PTR_ERR(vma);
vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
intel_gt_coherent_map_type(guc_to_gt(guc),
vma->obj, true));
if (IS_ERR(vaddr)) {
i915_vma_unpin_and_release(&vma, 0);
return PTR_ERR(vaddr);
}
*out_vma = vma;
*out_vaddr = vaddr;
return 0;
}
static int __guc_action_self_cfg(struct intel_guc *guc, u16 key, u16 len, u64 value)
{
u32 request[HOST2GUC_SELF_CFG_REQUEST_MSG_LEN] = {
FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_SELF_CFG),
FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY, key) |
FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN, len),
FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32, lower_32_bits(value)),
FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64, upper_32_bits(value)),
};
int ret;
GEM_BUG_ON(len > 2);
GEM_BUG_ON(len == 1 && upper_32_bits(value));
/* Self config must go over MMIO */
ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
if (unlikely(ret < 0))
return ret;
if (unlikely(ret > 1))
return -EPROTO;
if (unlikely(!ret))
return -ENOKEY;
return 0;
}
static int __guc_self_cfg(struct intel_guc *guc, u16 key, u16 len, u64 value)
{
int err = __guc_action_self_cfg(guc, key, len, value);
if (unlikely(err))
guc_probe_error(guc, "Unsuccessful self-config (%pe) key %#hx value %#llx\n",
ERR_PTR(err), key, value);
return err;
}
int intel_guc_self_cfg32(struct intel_guc *guc, u16 key, u32 value)
{
return __guc_self_cfg(guc, key, 1, value);
}
int intel_guc_self_cfg64(struct intel_guc *guc, u16 key, u64 value)
{
return __guc_self_cfg(guc, key, 2, value);
}
/**
* intel_guc_load_status - dump information about GuC load status
* @guc: the GuC
* @p: the &drm_printer
*
* Pretty printer for GuC load status.
*/
void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p)
{
struct intel_gt *gt = guc_to_gt(guc);
struct intel_uncore *uncore = gt->uncore;
intel_wakeref_t wakeref;
if (!intel_guc_is_supported(guc)) {
drm_printf(p, "GuC not supported\n");
return;
}
if (!intel_guc_is_wanted(guc)) {
drm_printf(p, "GuC disabled\n");
return;
}
intel_uc_fw_dump(&guc->fw, p);
with_intel_runtime_pm(uncore->rpm, wakeref) {
u32 status = intel_uncore_read(uncore, GUC_STATUS);
u32 i;
drm_printf(p, "GuC status 0x%08x:\n", status);
drm_printf(p, "\tBootrom status = 0x%x\n",
(status & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
drm_printf(p, "\tuKernel status = 0x%x\n",
(status & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
drm_printf(p, "\tMIA Core status = 0x%x\n",
(status & GS_MIA_MASK) >> GS_MIA_SHIFT);
drm_puts(p, "Scratch registers:\n");
for (i = 0; i < 16; i++) {
drm_printf(p, "\t%2d: \t0x%x\n",
i, intel_uncore_read(uncore, SOFT_SCRATCH(i)));
}
}
}
void intel_guc_write_barrier(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
if (i915_gem_object_is_lmem(guc->ct.vma->obj)) {
/*
* Ensure intel_uncore_write_fw can be used rather than
* intel_uncore_write.
*/
GEM_BUG_ON(guc->send_regs.fw_domains);
/*
* This register is used by the i915 and GuC for MMIO based
* communication. Once we are in this code CTBs are the only
* method the i915 uses to communicate with the GuC so it is
* safe to write to this register (a value of 0 is NOP for MMIO
* communication). If we ever start mixing CTBs and MMIOs a new
* register will have to be chosen. This function is also used
* to enforce ordering of a work queue item write and an update
* to the process descriptor. When a work queue is being used,
* CTBs are also the only mechanism of communication.
*/
intel_uncore_write_fw(gt->uncore, GEN11_SOFT_SCRATCH(0), 0);
} else {
/* wmb() sufficient for a barrier if in smem */
wmb();
}
}
| linux-master | drivers/gpu/drm/i915/gt/uc/intel_guc.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2014-2019 Intel Corporation
*/
#include "gt/intel_gsc.h"
#include "gt/intel_gt.h"
#include "intel_gsc_binary_headers.h"
#include "intel_gsc_uc_heci_cmd_submit.h"
#include "intel_huc.h"
#include "intel_huc_fw.h"
#include "intel_huc_print.h"
#include "i915_drv.h"
#include "pxp/intel_pxp_huc.h"
#include "pxp/intel_pxp_cmd_interface_43.h"
struct mtl_huc_auth_msg_in {
struct intel_gsc_mtl_header header;
struct pxp43_new_huc_auth_in huc_in;
} __packed;
struct mtl_huc_auth_msg_out {
struct intel_gsc_mtl_header header;
struct pxp43_huc_auth_out huc_out;
} __packed;
int intel_huc_fw_auth_via_gsccs(struct intel_huc *huc)
{
struct intel_gt *gt = huc_to_gt(huc);
struct drm_i915_gem_object *obj;
struct mtl_huc_auth_msg_in *msg_in;
struct mtl_huc_auth_msg_out *msg_out;
void *pkt_vaddr;
u64 pkt_offset;
int retry = 5;
int err = 0;
if (!huc->heci_pkt)
return -ENODEV;
obj = huc->heci_pkt->obj;
pkt_offset = i915_ggtt_offset(huc->heci_pkt);
pkt_vaddr = i915_gem_object_pin_map_unlocked(obj,
intel_gt_coherent_map_type(gt, obj, true));
if (IS_ERR(pkt_vaddr))
return PTR_ERR(pkt_vaddr);
msg_in = pkt_vaddr;
msg_out = pkt_vaddr + PXP43_HUC_AUTH_INOUT_SIZE;
intel_gsc_uc_heci_cmd_emit_mtl_header(&msg_in->header,
HECI_MEADDRESS_PXP,
sizeof(*msg_in), 0);
msg_in->huc_in.header.api_version = PXP_APIVER(4, 3);
msg_in->huc_in.header.command_id = PXP43_CMDID_NEW_HUC_AUTH;
msg_in->huc_in.header.status = 0;
msg_in->huc_in.header.buffer_len = sizeof(msg_in->huc_in) -
sizeof(msg_in->huc_in.header);
msg_in->huc_in.huc_base_address = huc->fw.vma_res.start;
msg_in->huc_in.huc_size = huc->fw.obj->base.size;
do {
err = intel_gsc_uc_heci_cmd_submit_packet(>->uc.gsc,
pkt_offset, sizeof(*msg_in),
pkt_offset + PXP43_HUC_AUTH_INOUT_SIZE,
PXP43_HUC_AUTH_INOUT_SIZE);
if (err) {
huc_err(huc, "failed to submit GSC request to auth: %d\n", err);
goto out_unpin;
}
if (msg_out->header.flags & GSC_OUTFLAG_MSG_PENDING) {
msg_in->header.gsc_message_handle = msg_out->header.gsc_message_handle;
err = -EBUSY;
msleep(50);
}
} while (--retry && err == -EBUSY);
if (err)
goto out_unpin;
if (msg_out->header.message_size != sizeof(*msg_out)) {
huc_err(huc, "invalid GSC reply length %u [expected %zu]\n",
msg_out->header.message_size, sizeof(*msg_out));
err = -EPROTO;
goto out_unpin;
}
/*
* The GSC will return PXP_STATUS_OP_NOT_PERMITTED if the HuC is already
* loaded. If the same error is ever returned with HuC not loaded we'll
* still catch it when we check the authentication bit later.
*/
if (msg_out->huc_out.header.status != PXP_STATUS_SUCCESS &&
msg_out->huc_out.header.status != PXP_STATUS_OP_NOT_PERMITTED) {
huc_err(huc, "auth failed with GSC error = 0x%x\n",
msg_out->huc_out.header.status);
err = -EIO;
goto out_unpin;
}
out_unpin:
i915_gem_object_unpin_map(obj);
return err;
}
static bool css_valid(const void *data, size_t size)
{
const struct uc_css_header *css = data;
if (unlikely(size < sizeof(struct uc_css_header)))
return false;
if (css->module_type != 0x6)
return false;
if (css->module_vendor != PCI_VENDOR_ID_INTEL)
return false;
return true;
}
static inline u32 entry_offset(const struct intel_gsc_cpd_entry *entry)
{
return entry->offset & INTEL_GSC_CPD_ENTRY_OFFSET_MASK;
}
int intel_huc_fw_get_binary_info(struct intel_uc_fw *huc_fw, const void *data, size_t size)
{
struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
const struct intel_gsc_cpd_header_v2 *header = data;
const struct intel_gsc_cpd_entry *entry;
size_t min_size = sizeof(*header);
int i;
if (!huc_fw->has_gsc_headers) {
huc_err(huc, "Invalid FW type for GSC header parsing!\n");
return -EINVAL;
}
if (size < sizeof(*header)) {
huc_err(huc, "FW too small! %zu < %zu\n", size, min_size);
return -ENODATA;
}
/*
* The GSC-enabled HuC binary starts with a directory header, followed
* by a series of entries. Each entry is identified by a name and
* points to a specific section of the binary containing the relevant
* data. The entries we're interested in are:
* - "HUCP.man": points to the GSC manifest header for the HuC, which
* contains the version info.
* - "huc_fw": points to the legacy-style binary that can be used for
* load via the DMA. This entry only contains a valid CSS
* on binaries for platforms that support 2-step HuC load
* via dma and auth via GSC (like MTL).
*
* --------------------------------------------------
* [ intel_gsc_cpd_header_v2 ]
* --------------------------------------------------
* [ intel_gsc_cpd_entry[] ]
* [ entry1 ]
* [ ... ]
* [ entryX ]
* [ "HUCP.man" ]
* [ ... ]
* [ offset >----------------------------]------o
* [ ... ] |
* [ entryY ] |
* [ "huc_fw" ] |
* [ ... ] |
* [ offset >----------------------------]----------o
* -------------------------------------------------- | |
* | |
* -------------------------------------------------- | |
* [ intel_gsc_manifest_header ]<-----o |
* [ ... ] |
* [ intel_gsc_version fw_version ] |
* [ ... ] |
* -------------------------------------------------- |
* |
* -------------------------------------------------- |
* [ data[] ]<---------o
* [ ... ]
* [ ... ]
* --------------------------------------------------
*/
if (header->header_marker != INTEL_GSC_CPD_HEADER_MARKER) {
huc_err(huc, "invalid marker for CPD header: 0x%08x!\n",
header->header_marker);
return -EINVAL;
}
/* we only have binaries with header v2 and entry v1 for now */
if (header->header_version != 2 || header->entry_version != 1) {
huc_err(huc, "invalid CPD header/entry version %u:%u!\n",
header->header_version, header->entry_version);
return -EINVAL;
}
if (header->header_length < sizeof(struct intel_gsc_cpd_header_v2)) {
huc_err(huc, "invalid CPD header length %u!\n",
header->header_length);
return -EINVAL;
}
min_size = header->header_length + sizeof(*entry) * header->num_of_entries;
if (size < min_size) {
huc_err(huc, "FW too small! %zu < %zu\n", size, min_size);
return -ENODATA;
}
entry = data + header->header_length;
for (i = 0; i < header->num_of_entries; i++, entry++) {
if (strcmp(entry->name, "HUCP.man") == 0)
intel_uc_fw_version_from_gsc_manifest(&huc_fw->file_selected.ver,
data + entry_offset(entry));
if (strcmp(entry->name, "huc_fw") == 0) {
u32 offset = entry_offset(entry);
if (offset < size && css_valid(data + offset, size - offset))
huc_fw->dma_start_offset = offset;
}
}
return 0;
}
int intel_huc_fw_load_and_auth_via_gsc(struct intel_huc *huc)
{
int ret;
if (!intel_huc_is_loaded_by_gsc(huc))
return -ENODEV;
if (!intel_uc_fw_is_loadable(&huc->fw))
return -ENOEXEC;
/*
* If we abort a suspend, HuC might still be loaded when the mei
* component gets re-bound and this function called again. If so, just
* mark the HuC as loaded.
*/
if (intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GSC)) {
intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_RUNNING);
return 0;
}
GEM_WARN_ON(intel_uc_fw_is_loaded(&huc->fw));
ret = intel_pxp_huc_load_and_auth(huc_to_gt(huc)->i915->pxp);
if (ret)
return ret;
intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_TRANSFERRED);
return intel_huc_wait_for_auth_complete(huc, INTEL_HUC_AUTH_BY_GSC);
}
/**
* intel_huc_fw_upload() - load HuC uCode to device via DMA transfer
* @huc: intel_huc structure
*
* Called from intel_uc_init_hw() during driver load, resume from sleep and
* after a GPU reset. Note that HuC must be loaded before GuC.
*
* The firmware image should have already been fetched into memory, so only
* check that fetch succeeded, and then transfer the image to the h/w.
*
* Return: non-zero code on error
*/
int intel_huc_fw_upload(struct intel_huc *huc)
{
if (intel_huc_is_loaded_by_gsc(huc))
return -ENODEV;
/* HW doesn't look at destination address for HuC, so set it to 0 */
return intel_uc_fw_upload(&huc->fw, 0, HUC_UKERNEL);
}
| linux-master | drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2023 Intel Corporation
*/
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_ring.h"
#include "intel_gsc_uc_heci_cmd_submit.h"
struct gsc_heci_pkt {
u64 addr_in;
u32 size_in;
u64 addr_out;
u32 size_out;
};
static int emit_gsc_heci_pkt(struct i915_request *rq, struct gsc_heci_pkt *pkt)
{
u32 *cs;
cs = intel_ring_begin(rq, 8);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = GSC_HECI_CMD_PKT;
*cs++ = lower_32_bits(pkt->addr_in);
*cs++ = upper_32_bits(pkt->addr_in);
*cs++ = pkt->size_in;
*cs++ = lower_32_bits(pkt->addr_out);
*cs++ = upper_32_bits(pkt->addr_out);
*cs++ = pkt->size_out;
*cs++ = 0;
intel_ring_advance(rq, cs);
return 0;
}
int intel_gsc_uc_heci_cmd_submit_packet(struct intel_gsc_uc *gsc, u64 addr_in,
u32 size_in, u64 addr_out,
u32 size_out)
{
struct intel_context *ce = gsc->ce;
struct i915_request *rq;
struct gsc_heci_pkt pkt = {
.addr_in = addr_in,
.size_in = size_in,
.addr_out = addr_out,
.size_out = size_out
};
int err;
if (!ce)
return -ENODEV;
rq = i915_request_create(ce);
if (IS_ERR(rq))
return PTR_ERR(rq);
if (ce->engine->emit_init_breadcrumb) {
err = ce->engine->emit_init_breadcrumb(rq);
if (err)
goto out_rq;
}
err = emit_gsc_heci_pkt(rq, &pkt);
if (err)
goto out_rq;
err = ce->engine->emit_flush(rq, 0);
out_rq:
i915_request_get(rq);
if (unlikely(err))
i915_request_set_error_once(rq, err);
i915_request_add(rq);
if (!err && i915_request_wait(rq, 0, msecs_to_jiffies(500)) < 0)
err = -ETIME;
i915_request_put(rq);
if (err)
drm_err(&gsc_uc_to_gt(gsc)->i915->drm,
"Request submission for GSC heci cmd failed (%d)\n",
err);
return err;
}
void intel_gsc_uc_heci_cmd_emit_mtl_header(struct intel_gsc_mtl_header *header,
u8 heci_client_id, u32 message_size,
u64 host_session_id)
{
host_session_id &= ~HOST_SESSION_MASK;
if (host_session_id && heci_client_id == HECI_MEADDRESS_PXP)
host_session_id |= HOST_SESSION_PXP_SINGLE;
header->validity_marker = GSC_HECI_VALIDITY_MARKER;
header->heci_client_id = heci_client_id;
header->host_session_handle = host_session_id;
header->header_version = MTL_GSC_HEADER_VERSION;
header->message_size = message_size;
}
static void
emit_gsc_heci_pkt_nonpriv(u32 *cmd, struct intel_gsc_heci_non_priv_pkt *pkt)
{
*cmd++ = GSC_HECI_CMD_PKT;
*cmd++ = lower_32_bits(pkt->addr_in);
*cmd++ = upper_32_bits(pkt->addr_in);
*cmd++ = pkt->size_in;
*cmd++ = lower_32_bits(pkt->addr_out);
*cmd++ = upper_32_bits(pkt->addr_out);
*cmd++ = pkt->size_out;
*cmd++ = 0;
*cmd++ = MI_BATCH_BUFFER_END;
}
int
intel_gsc_uc_heci_cmd_submit_nonpriv(struct intel_gsc_uc *gsc,
struct intel_context *ce,
struct intel_gsc_heci_non_priv_pkt *pkt,
u32 *cmd, int timeout_ms)
{
struct intel_engine_cs *engine;
struct i915_gem_ww_ctx ww;
struct i915_request *rq;
int err, trials = 0;
i915_gem_ww_ctx_init(&ww, false);
retry:
err = i915_gem_object_lock(pkt->bb_vma->obj, &ww);
if (err)
goto out_ww;
err = i915_gem_object_lock(pkt->heci_pkt_vma->obj, &ww);
if (err)
goto out_ww;
err = intel_context_pin_ww(ce, &ww);
if (err)
goto out_ww;
rq = i915_request_create(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_unpin_ce;
}
emit_gsc_heci_pkt_nonpriv(cmd, pkt);
err = i915_vma_move_to_active(pkt->bb_vma, rq, 0);
if (err)
goto out_rq;
err = i915_vma_move_to_active(pkt->heci_pkt_vma, rq, EXEC_OBJECT_WRITE);
if (err)
goto out_rq;
engine = rq->context->engine;
if (engine->emit_init_breadcrumb) {
err = engine->emit_init_breadcrumb(rq);
if (err)
goto out_rq;
}
err = engine->emit_bb_start(rq, i915_vma_offset(pkt->bb_vma), PAGE_SIZE, 0);
if (err)
goto out_rq;
err = ce->engine->emit_flush(rq, 0);
if (err)
drm_err(&gsc_uc_to_gt(gsc)->i915->drm,
"Failed emit-flush for gsc-heci-non-priv-pkterr=%d\n", err);
out_rq:
i915_request_get(rq);
if (unlikely(err))
i915_request_set_error_once(rq, err);
i915_request_add(rq);
if (!err) {
if (i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE,
msecs_to_jiffies(timeout_ms)) < 0)
err = -ETIME;
}
i915_request_put(rq);
out_unpin_ce:
intel_context_unpin(ce);
out_ww:
if (err == -EDEADLK) {
err = i915_gem_ww_ctx_backoff(&ww);
if (!err) {
if (++trials < 10)
goto retry;
else
err = -EAGAIN;
}
}
i915_gem_ww_ctx_fini(&ww);
return err;
}
| linux-master | drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2023 Intel Corporation
*/
#include <linux/component.h>
#include <drm/i915_component.h>
#include <drm/i915_gsc_proxy_mei_interface.h>
#include "gt/intel_gt.h"
#include "gt/intel_gt_print.h"
#include "intel_gsc_proxy.h"
#include "intel_gsc_uc.h"
#include "intel_gsc_uc_heci_cmd_submit.h"
#include "i915_drv.h"
#include "i915_reg.h"
/*
* GSC proxy:
* The GSC uC needs to communicate with the CSME to perform certain operations.
* Since the GSC can't perform this communication directly on platforms where it
* is integrated in GT, i915 needs to transfer the messages from GSC to CSME
* and back. i915 must manually start the proxy flow after the GSC is loaded to
* signal to GSC that we're ready to handle its messages and allow it to query
* its init data from CSME; GSC will then trigger an HECI2 interrupt if it needs
* to send messages to CSME again.
* The proxy flow is as follow:
* 1 - i915 submits a request to GSC asking for the message to CSME
* 2 - GSC replies with the proxy header + payload for CSME
* 3 - i915 sends the reply from GSC as-is to CSME via the mei proxy component
* 4 - CSME replies with the proxy header + payload for GSC
* 5 - i915 submits a request to GSC with the reply from CSME
* 6 - GSC replies either with a new header + payload (same as step 2, so we
* restart from there) or with an end message.
*/
/*
* The component should load quite quickly in most cases, but it could take
* a bit. Using a very big timeout just to cover the worst case scenario
*/
#define GSC_PROXY_INIT_TIMEOUT_MS 20000
/* the protocol supports up to 32K in each direction */
#define GSC_PROXY_BUFFER_SIZE SZ_32K
#define GSC_PROXY_CHANNEL_SIZE (GSC_PROXY_BUFFER_SIZE * 2)
#define GSC_PROXY_MAX_MSG_SIZE (GSC_PROXY_BUFFER_SIZE - sizeof(struct intel_gsc_mtl_header))
/* FW-defined proxy header */
struct intel_gsc_proxy_header {
/*
* hdr:
* Bits 0-7: type of the proxy message (see enum intel_gsc_proxy_type)
* Bits 8-15: rsvd
* Bits 16-31: length in bytes of the payload following the proxy header
*/
u32 hdr;
#define GSC_PROXY_TYPE GENMASK(7, 0)
#define GSC_PROXY_PAYLOAD_LENGTH GENMASK(31, 16)
u32 source; /* Source of the Proxy message */
u32 destination; /* Destination of the Proxy message */
#define GSC_PROXY_ADDRESSING_KMD 0x10000
#define GSC_PROXY_ADDRESSING_GSC 0x20000
#define GSC_PROXY_ADDRESSING_CSME 0x30000
u32 status; /* Command status */
} __packed;
/* FW-defined proxy types */
enum intel_gsc_proxy_type {
GSC_PROXY_MSG_TYPE_PROXY_INVALID = 0,
GSC_PROXY_MSG_TYPE_PROXY_QUERY = 1,
GSC_PROXY_MSG_TYPE_PROXY_PAYLOAD = 2,
GSC_PROXY_MSG_TYPE_PROXY_END = 3,
GSC_PROXY_MSG_TYPE_PROXY_NOTIFICATION = 4,
};
struct gsc_proxy_msg {
struct intel_gsc_mtl_header header;
struct intel_gsc_proxy_header proxy_header;
} __packed;
static int proxy_send_to_csme(struct intel_gsc_uc *gsc)
{
struct intel_gt *gt = gsc_uc_to_gt(gsc);
struct i915_gsc_proxy_component *comp = gsc->proxy.component;
struct intel_gsc_mtl_header *hdr;
void *in = gsc->proxy.to_csme;
void *out = gsc->proxy.to_gsc;
u32 in_size;
int ret;
/* CSME msg only includes the proxy */
hdr = in;
in += sizeof(struct intel_gsc_mtl_header);
out += sizeof(struct intel_gsc_mtl_header);
in_size = hdr->message_size - sizeof(struct intel_gsc_mtl_header);
/* the message must contain at least the proxy header */
if (in_size < sizeof(struct intel_gsc_proxy_header) ||
in_size > GSC_PROXY_MAX_MSG_SIZE) {
gt_err(gt, "Invalid CSME message size: %u\n", in_size);
return -EINVAL;
}
ret = comp->ops->send(comp->mei_dev, in, in_size);
if (ret < 0) {
gt_err(gt, "Failed to send CSME message\n");
return ret;
}
ret = comp->ops->recv(comp->mei_dev, out, GSC_PROXY_MAX_MSG_SIZE);
if (ret < 0) {
gt_err(gt, "Failed to receive CSME message\n");
return ret;
}
return ret;
}
static int proxy_send_to_gsc(struct intel_gsc_uc *gsc)
{
struct intel_gt *gt = gsc_uc_to_gt(gsc);
u32 *marker = gsc->proxy.to_csme; /* first dw of the reply header */
u64 addr_in = i915_ggtt_offset(gsc->proxy.vma);
u64 addr_out = addr_in + GSC_PROXY_BUFFER_SIZE;
u32 size = ((struct gsc_proxy_msg *)gsc->proxy.to_gsc)->header.message_size;
int err;
/* the message must contain at least the gsc and proxy headers */
if (size < sizeof(struct gsc_proxy_msg) || size > GSC_PROXY_BUFFER_SIZE) {
gt_err(gt, "Invalid GSC proxy message size: %u\n", size);
return -EINVAL;
}
/* clear the message marker */
*marker = 0;
/* make sure the marker write is flushed */
wmb();
/* send the request */
err = intel_gsc_uc_heci_cmd_submit_packet(gsc, addr_in, size,
addr_out, GSC_PROXY_BUFFER_SIZE);
if (!err) {
/* wait for the reply to show up */
err = wait_for(*marker != 0, 300);
if (err)
gt_err(gt, "Failed to get a proxy reply from gsc\n");
}
return err;
}
static int validate_proxy_header(struct intel_gsc_proxy_header *header,
u32 source, u32 dest)
{
u32 type = FIELD_GET(GSC_PROXY_TYPE, header->hdr);
u32 length = FIELD_GET(GSC_PROXY_PAYLOAD_LENGTH, header->hdr);
int ret = 0;
if (header->destination != dest || header->source != source) {
ret = -ENOEXEC;
goto fail;
}
switch (type) {
case GSC_PROXY_MSG_TYPE_PROXY_PAYLOAD:
if (length > 0)
break;
fallthrough;
case GSC_PROXY_MSG_TYPE_PROXY_INVALID:
ret = -EIO;
goto fail;
default:
break;
}
fail:
return ret;
}
static int proxy_query(struct intel_gsc_uc *gsc)
{
struct intel_gt *gt = gsc_uc_to_gt(gsc);
struct gsc_proxy_msg *to_gsc = gsc->proxy.to_gsc;
struct gsc_proxy_msg *to_csme = gsc->proxy.to_csme;
int ret;
intel_gsc_uc_heci_cmd_emit_mtl_header(&to_gsc->header,
HECI_MEADDRESS_PROXY,
sizeof(struct gsc_proxy_msg),
0);
to_gsc->proxy_header.hdr =
FIELD_PREP(GSC_PROXY_TYPE, GSC_PROXY_MSG_TYPE_PROXY_QUERY) |
FIELD_PREP(GSC_PROXY_PAYLOAD_LENGTH, 0);
to_gsc->proxy_header.source = GSC_PROXY_ADDRESSING_KMD;
to_gsc->proxy_header.destination = GSC_PROXY_ADDRESSING_GSC;
to_gsc->proxy_header.status = 0;
while (1) {
/* clear the GSC response header space */
memset(gsc->proxy.to_csme, 0, sizeof(struct gsc_proxy_msg));
/* send proxy message to GSC */
ret = proxy_send_to_gsc(gsc);
if (ret) {
gt_err(gt, "failed to send proxy message to GSC! %d\n", ret);
goto proxy_error;
}
/* stop if this was the last message */
if (FIELD_GET(GSC_PROXY_TYPE, to_csme->proxy_header.hdr) ==
GSC_PROXY_MSG_TYPE_PROXY_END)
break;
/* make sure the GSC-to-CSME proxy header is sane */
ret = validate_proxy_header(&to_csme->proxy_header,
GSC_PROXY_ADDRESSING_GSC,
GSC_PROXY_ADDRESSING_CSME);
if (ret) {
gt_err(gt, "invalid GSC to CSME proxy header! %d\n", ret);
goto proxy_error;
}
/* send the GSC message to the CSME */
ret = proxy_send_to_csme(gsc);
if (ret < 0) {
gt_err(gt, "failed to send proxy message to CSME! %d\n", ret);
goto proxy_error;
}
/* update the GSC message size with the returned value from CSME */
to_gsc->header.message_size = ret + sizeof(struct intel_gsc_mtl_header);
/* make sure the CSME-to-GSC proxy header is sane */
ret = validate_proxy_header(&to_gsc->proxy_header,
GSC_PROXY_ADDRESSING_CSME,
GSC_PROXY_ADDRESSING_GSC);
if (ret) {
gt_err(gt, "invalid CSME to GSC proxy header! %d\n", ret);
goto proxy_error;
}
}
proxy_error:
return ret < 0 ? ret : 0;
}
int intel_gsc_proxy_request_handler(struct intel_gsc_uc *gsc)
{
struct intel_gt *gt = gsc_uc_to_gt(gsc);
int err;
if (!gsc->proxy.component_added)
return -ENODEV;
assert_rpm_wakelock_held(gt->uncore->rpm);
/* when GSC is loaded, we can queue this before the component is bound */
err = wait_for(gsc->proxy.component, GSC_PROXY_INIT_TIMEOUT_MS);
if (err) {
gt_err(gt, "GSC proxy component didn't bind within the expected timeout\n");
return -EIO;
}
mutex_lock(&gsc->proxy.mutex);
if (!gsc->proxy.component) {
gt_err(gt, "GSC proxy worker called without the component being bound!\n");
err = -EIO;
} else {
/*
* write the status bit to clear it and allow new proxy
* interrupts to be generated while we handle the current
* request, but be sure not to write the reset bit
*/
intel_uncore_rmw(gt->uncore, HECI_H_CSR(MTL_GSC_HECI2_BASE),
HECI_H_CSR_RST, HECI_H_CSR_IS);
err = proxy_query(gsc);
}
mutex_unlock(&gsc->proxy.mutex);
return err;
}
void intel_gsc_proxy_irq_handler(struct intel_gsc_uc *gsc, u32 iir)
{
struct intel_gt *gt = gsc_uc_to_gt(gsc);
if (unlikely(!iir))
return;
lockdep_assert_held(gt->irq_lock);
if (!gsc->proxy.component) {
gt_err(gt, "GSC proxy irq received without the component being bound!\n");
return;
}
gsc->gsc_work_actions |= GSC_ACTION_SW_PROXY;
queue_work(gsc->wq, &gsc->work);
}
static int i915_gsc_proxy_component_bind(struct device *i915_kdev,
struct device *mei_kdev, void *data)
{
struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
struct intel_gt *gt = i915->media_gt;
struct intel_gsc_uc *gsc = >->uc.gsc;
intel_wakeref_t wakeref;
/* enable HECI2 IRQs */
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
intel_uncore_rmw(gt->uncore, HECI_H_CSR(MTL_GSC_HECI2_BASE),
HECI_H_CSR_RST, HECI_H_CSR_IE);
mutex_lock(&gsc->proxy.mutex);
gsc->proxy.component = data;
gsc->proxy.component->mei_dev = mei_kdev;
mutex_unlock(&gsc->proxy.mutex);
return 0;
}
static void i915_gsc_proxy_component_unbind(struct device *i915_kdev,
struct device *mei_kdev, void *data)
{
struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
struct intel_gt *gt = i915->media_gt;
struct intel_gsc_uc *gsc = >->uc.gsc;
intel_wakeref_t wakeref;
mutex_lock(&gsc->proxy.mutex);
gsc->proxy.component = NULL;
mutex_unlock(&gsc->proxy.mutex);
/* disable HECI2 IRQs */
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
intel_uncore_rmw(gt->uncore, HECI_H_CSR(MTL_GSC_HECI2_BASE),
HECI_H_CSR_IE | HECI_H_CSR_RST, 0);
}
static const struct component_ops i915_gsc_proxy_component_ops = {
.bind = i915_gsc_proxy_component_bind,
.unbind = i915_gsc_proxy_component_unbind,
};
static int proxy_channel_alloc(struct intel_gsc_uc *gsc)
{
struct intel_gt *gt = gsc_uc_to_gt(gsc);
struct i915_vma *vma;
void *vaddr;
int err;
err = intel_guc_allocate_and_map_vma(>->uc.guc, GSC_PROXY_CHANNEL_SIZE,
&vma, &vaddr);
if (err)
return err;
gsc->proxy.vma = vma;
gsc->proxy.to_gsc = vaddr;
gsc->proxy.to_csme = vaddr + GSC_PROXY_BUFFER_SIZE;
return 0;
}
static void proxy_channel_free(struct intel_gsc_uc *gsc)
{
if (!gsc->proxy.vma)
return;
gsc->proxy.to_gsc = NULL;
gsc->proxy.to_csme = NULL;
i915_vma_unpin_and_release(&gsc->proxy.vma, I915_VMA_RELEASE_MAP);
}
void intel_gsc_proxy_fini(struct intel_gsc_uc *gsc)
{
struct intel_gt *gt = gsc_uc_to_gt(gsc);
struct drm_i915_private *i915 = gt->i915;
if (fetch_and_zero(&gsc->proxy.component_added))
component_del(i915->drm.dev, &i915_gsc_proxy_component_ops);
proxy_channel_free(gsc);
}
int intel_gsc_proxy_init(struct intel_gsc_uc *gsc)
{
int err;
struct intel_gt *gt = gsc_uc_to_gt(gsc);
struct drm_i915_private *i915 = gt->i915;
mutex_init(&gsc->proxy.mutex);
if (!IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY)) {
gt_info(gt, "can't init GSC proxy due to missing mei component\n");
return -ENODEV;
}
err = proxy_channel_alloc(gsc);
if (err)
return err;
err = component_add_typed(i915->drm.dev, &i915_gsc_proxy_component_ops,
I915_COMPONENT_GSC_PROXY);
if (err < 0) {
gt_err(gt, "Failed to add GSC_PROXY component (%d)\n", err);
goto out_free;
}
gsc->proxy.component_added = true;
return 0;
out_free:
proxy_channel_free(gsc);
return err;
}
| linux-master | drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c |
// SPDX-License-Identifier: MIT
/*
* Copyright �� 2021 Intel Corporation
*/
#include "gt/intel_gt_print.h"
#include "intel_guc_print.h"
#include "selftests/igt_spinner.h"
#include "selftests/intel_scheduler_helpers.h"
static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
{
int err = 0;
i915_request_get(rq);
i915_request_add(rq);
if (spin && !igt_wait_for_spinner(spin, rq))
err = -ETIMEDOUT;
return err;
}
static struct i915_request *nop_user_request(struct intel_context *ce,
struct i915_request *from)
{
struct i915_request *rq;
int ret;
rq = intel_context_create_request(ce);
if (IS_ERR(rq))
return rq;
if (from) {
ret = i915_sw_fence_await_dma_fence(&rq->submit,
&from->fence, 0,
I915_FENCE_GFP);
if (ret < 0) {
i915_request_put(rq);
return ERR_PTR(ret);
}
}
i915_request_get(rq);
i915_request_add(rq);
return rq;
}
static int intel_guc_scrub_ctbs(void *arg)
{
struct intel_gt *gt = arg;
int ret = 0;
int i;
struct i915_request *last[3] = {NULL, NULL, NULL}, *rq;
intel_wakeref_t wakeref;
struct intel_engine_cs *engine;
struct intel_context *ce;
if (!intel_has_gpu_reset(gt))
return 0;
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
engine = intel_selftest_find_any_engine(gt);
/* Submit requests and inject errors forcing G2H to be dropped */
for (i = 0; i < 3; ++i) {
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
ret = PTR_ERR(ce);
gt_err(gt, "Failed to create context %d: %pe\n", i, ce);
goto err;
}
switch (i) {
case 0:
ce->drop_schedule_enable = true;
break;
case 1:
ce->drop_schedule_disable = true;
break;
case 2:
ce->drop_deregister = true;
break;
}
rq = nop_user_request(ce, NULL);
intel_context_put(ce);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
gt_err(gt, "Failed to create request %d: %pe\n", i, rq);
goto err;
}
last[i] = rq;
}
for (i = 0; i < 3; ++i) {
ret = i915_request_wait(last[i], 0, HZ);
if (ret < 0) {
gt_err(gt, "Last request failed to complete: %pe\n", ERR_PTR(ret));
goto err;
}
i915_request_put(last[i]);
last[i] = NULL;
}
/* Force all H2G / G2H to be submitted / processed */
intel_gt_retire_requests(gt);
msleep(500);
/* Scrub missing G2H */
intel_gt_handle_error(engine->gt, -1, 0, "selftest reset");
/* GT will not idle if G2H are lost */
ret = intel_gt_wait_for_idle(gt, HZ);
if (ret < 0) {
gt_err(gt, "GT failed to idle: %pe\n", ERR_PTR(ret));
goto err;
}
err:
for (i = 0; i < 3; ++i)
if (last[i])
i915_request_put(last[i]);
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
return ret;
}
/*
* intel_guc_steal_guc_ids - Test to exhaust all guc_ids and then steal one
*
* This test creates a spinner which is used to block all subsequent submissions
* until it completes. Next, a loop creates a context and a NOP request each
* iteration until the guc_ids are exhausted (request creation returns -EAGAIN).
* The spinner is ended, unblocking all requests created in the loop. At this
* point all guc_ids are exhausted but are available to steal. Try to create
* another request which should successfully steal a guc_id. Wait on last
* request to complete, idle GPU, verify a guc_id was stolen via a counter, and
* exit the test. Test also artificially reduces the number of guc_ids so the
* test runs in a timely manner.
*/
static int intel_guc_steal_guc_ids(void *arg)
{
struct intel_gt *gt = arg;
struct intel_guc *guc = >->uc.guc;
int ret, sv, context_index = 0;
intel_wakeref_t wakeref;
struct intel_engine_cs *engine;
struct intel_context **ce;
struct igt_spinner spin;
struct i915_request *spin_rq = NULL, *rq, *last = NULL;
int number_guc_id_stolen = guc->number_guc_id_stolen;
ce = kcalloc(GUC_MAX_CONTEXT_ID, sizeof(*ce), GFP_KERNEL);
if (!ce) {
guc_err(guc, "Context array allocation failed\n");
return -ENOMEM;
}
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
engine = intel_selftest_find_any_engine(gt);
sv = guc->submission_state.num_guc_ids;
guc->submission_state.num_guc_ids = 512;
/* Create spinner to block requests in below loop */
ce[context_index] = intel_context_create(engine);
if (IS_ERR(ce[context_index])) {
ret = PTR_ERR(ce[context_index]);
guc_err(guc, "Failed to create context: %pe\n", ce[context_index]);
ce[context_index] = NULL;
goto err_wakeref;
}
ret = igt_spinner_init(&spin, engine->gt);
if (ret) {
guc_err(guc, "Failed to create spinner: %pe\n", ERR_PTR(ret));
goto err_contexts;
}
spin_rq = igt_spinner_create_request(&spin, ce[context_index],
MI_ARB_CHECK);
if (IS_ERR(spin_rq)) {
ret = PTR_ERR(spin_rq);
guc_err(guc, "Failed to create spinner request: %pe\n", spin_rq);
goto err_contexts;
}
ret = request_add_spin(spin_rq, &spin);
if (ret) {
guc_err(guc, "Failed to add Spinner request: %pe\n", ERR_PTR(ret));
goto err_spin_rq;
}
/* Use all guc_ids */
while (ret != -EAGAIN) {
ce[++context_index] = intel_context_create(engine);
if (IS_ERR(ce[context_index])) {
ret = PTR_ERR(ce[context_index]);
guc_err(guc, "Failed to create context: %pe\n", ce[context_index]);
ce[context_index--] = NULL;
goto err_spin_rq;
}
rq = nop_user_request(ce[context_index], spin_rq);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
rq = NULL;
if ((ret != -EAGAIN) || !last) {
guc_err(guc, "Failed to create %srequest %d: %pe\n",
last ? "" : "first ", context_index, ERR_PTR(ret));
goto err_spin_rq;
}
} else {
if (last)
i915_request_put(last);
last = rq;
}
}
/* Release blocked requests */
igt_spinner_end(&spin);
ret = intel_selftest_wait_for_rq(spin_rq);
if (ret) {
guc_err(guc, "Spin request failed to complete: %pe\n", ERR_PTR(ret));
i915_request_put(last);
goto err_spin_rq;
}
i915_request_put(spin_rq);
igt_spinner_fini(&spin);
spin_rq = NULL;
/* Wait for last request */
ret = i915_request_wait(last, 0, HZ * 30);
i915_request_put(last);
if (ret < 0) {
guc_err(guc, "Last request failed to complete: %pe\n", ERR_PTR(ret));
goto err_spin_rq;
}
/* Try to steal guc_id */
rq = nop_user_request(ce[context_index], NULL);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
guc_err(guc, "Failed to steal guc_id %d: %pe\n", context_index, rq);
goto err_spin_rq;
}
/* Wait for request with stolen guc_id */
ret = i915_request_wait(rq, 0, HZ);
i915_request_put(rq);
if (ret < 0) {
guc_err(guc, "Request with stolen guc_id failed to complete: %pe\n", ERR_PTR(ret));
goto err_spin_rq;
}
/* Wait for idle */
ret = intel_gt_wait_for_idle(gt, HZ * 30);
if (ret < 0) {
guc_err(guc, "GT failed to idle: %pe\n", ERR_PTR(ret));
goto err_spin_rq;
}
/* Verify a guc_id was stolen */
if (guc->number_guc_id_stolen == number_guc_id_stolen) {
guc_err(guc, "No guc_id was stolen");
ret = -EINVAL;
} else {
ret = 0;
}
err_spin_rq:
if (spin_rq) {
igt_spinner_end(&spin);
intel_selftest_wait_for_rq(spin_rq);
i915_request_put(spin_rq);
igt_spinner_fini(&spin);
intel_gt_wait_for_idle(gt, HZ * 30);
}
err_contexts:
for (; context_index >= 0 && ce[context_index]; --context_index)
intel_context_put(ce[context_index]);
err_wakeref:
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
kfree(ce);
guc->submission_state.num_guc_ids = sv;
return ret;
}
int intel_guc_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(intel_guc_scrub_ctbs),
SUBTEST(intel_guc_steal_guc_ids),
};
struct intel_gt *gt = to_gt(i915);
if (intel_gt_is_wedged(gt))
return 0;
if (!intel_uc_uses_guc_submission(>->uc))
return 0;
return intel_gt_live_subtests(tests, gt);
}
| linux-master | drivers/gpu/drm/i915/gt/uc/selftest_guc.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2016-2019 Intel Corporation
*/
#include <linux/types.h>
#include "gt/intel_gt.h"
#include "intel_guc_reg.h"
#include "intel_huc.h"
#include "intel_huc_print.h"
#include "i915_drv.h"
#include "i915_reg.h"
#include "pxp/intel_pxp_cmd_interface_43.h"
#include <linux/device/bus.h>
#include <linux/mei_aux.h>
/**
* DOC: HuC
*
* The HuC is a dedicated microcontroller for usage in media HEVC (High
* Efficiency Video Coding) operations. Userspace can directly use the firmware
* capabilities by adding HuC specific commands to batch buffers.
*
* The kernel driver is only responsible for loading the HuC firmware and
* triggering its security authentication. This is done differently depending
* on the platform:
*
* - older platforms (from Gen9 to most Gen12s): the load is performed via DMA
* and the authentication via GuC
* - DG2: load and authentication are both performed via GSC.
* - MTL and newer platforms: the load is performed via DMA (same as with
* not-DG2 older platforms), while the authentication is done in 2-steps,
* a first auth for clear-media workloads via GuC and a second one for all
* workloads via GSC.
*
* On platforms where the GuC does the authentication, to correctly do so the
* HuC binary must be loaded before the GuC one.
* Loading the HuC is optional; however, not using the HuC might negatively
* impact power usage and/or performance of media workloads, depending on the
* use-cases.
* HuC must be reloaded on events that cause the WOPCM to lose its contents
* (S3/S4, FLR); on older platforms the HuC must also be reloaded on GuC/GT
* reset, while on newer ones it will survive that.
*
* See https://github.com/intel/media-driver for the latest details on HuC
* functionality.
*/
/**
* DOC: HuC Memory Management
*
* Similarly to the GuC, the HuC can't do any memory allocations on its own,
* with the difference being that the allocations for HuC usage are handled by
* the userspace driver instead of the kernel one. The HuC accesses the memory
* via the PPGTT belonging to the context loaded on the VCS executing the
* HuC-specific commands.
*/
/*
* MEI-GSC load is an async process. The probing of the exposed aux device
* (see intel_gsc.c) usually happens a few seconds after i915 probe, depending
* on when the kernel schedules it. Unless something goes terribly wrong, we're
* guaranteed for this to happen during boot, so the big timeout is a safety net
* that we never expect to need.
* MEI-PXP + HuC load usually takes ~300ms, but if the GSC needs to be resumed
* and/or reset, this can take longer. Note that the kernel might schedule
* other work between the i915 init/resume and the MEI one, which can add to
* the delay.
*/
#define GSC_INIT_TIMEOUT_MS 10000
#define PXP_INIT_TIMEOUT_MS 5000
static int sw_fence_dummy_notify(struct i915_sw_fence *sf,
enum i915_sw_fence_notify state)
{
return NOTIFY_DONE;
}
static void __delayed_huc_load_complete(struct intel_huc *huc)
{
if (!i915_sw_fence_done(&huc->delayed_load.fence))
i915_sw_fence_complete(&huc->delayed_load.fence);
}
static void delayed_huc_load_complete(struct intel_huc *huc)
{
hrtimer_cancel(&huc->delayed_load.timer);
__delayed_huc_load_complete(huc);
}
static void __gsc_init_error(struct intel_huc *huc)
{
huc->delayed_load.status = INTEL_HUC_DELAYED_LOAD_ERROR;
__delayed_huc_load_complete(huc);
}
static void gsc_init_error(struct intel_huc *huc)
{
hrtimer_cancel(&huc->delayed_load.timer);
__gsc_init_error(huc);
}
static void gsc_init_done(struct intel_huc *huc)
{
hrtimer_cancel(&huc->delayed_load.timer);
/* MEI-GSC init is done, now we wait for MEI-PXP to bind */
huc->delayed_load.status = INTEL_HUC_WAITING_ON_PXP;
if (!i915_sw_fence_done(&huc->delayed_load.fence))
hrtimer_start(&huc->delayed_load.timer,
ms_to_ktime(PXP_INIT_TIMEOUT_MS),
HRTIMER_MODE_REL);
}
static enum hrtimer_restart huc_delayed_load_timer_callback(struct hrtimer *hrtimer)
{
struct intel_huc *huc = container_of(hrtimer, struct intel_huc, delayed_load.timer);
if (!intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GSC)) {
if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_GSC)
huc_notice(huc, "timed out waiting for MEI GSC\n");
else if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_PXP)
huc_notice(huc, "timed out waiting for MEI PXP\n");
else
MISSING_CASE(huc->delayed_load.status);
__gsc_init_error(huc);
}
return HRTIMER_NORESTART;
}
static void huc_delayed_load_start(struct intel_huc *huc)
{
ktime_t delay;
GEM_BUG_ON(intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GSC));
/*
* On resume we don't have to wait for MEI-GSC to be re-probed, but we
* do need to wait for MEI-PXP to reset & re-bind
*/
switch (huc->delayed_load.status) {
case INTEL_HUC_WAITING_ON_GSC:
delay = ms_to_ktime(GSC_INIT_TIMEOUT_MS);
break;
case INTEL_HUC_WAITING_ON_PXP:
delay = ms_to_ktime(PXP_INIT_TIMEOUT_MS);
break;
default:
gsc_init_error(huc);
return;
}
/*
* This fence is always complete unless we're waiting for the
* GSC device to come up to load the HuC. We arm the fence here
* and complete it when we confirm that the HuC is loaded from
* the PXP bind callback.
*/
GEM_BUG_ON(!i915_sw_fence_done(&huc->delayed_load.fence));
i915_sw_fence_fini(&huc->delayed_load.fence);
i915_sw_fence_reinit(&huc->delayed_load.fence);
i915_sw_fence_await(&huc->delayed_load.fence);
i915_sw_fence_commit(&huc->delayed_load.fence);
hrtimer_start(&huc->delayed_load.timer, delay, HRTIMER_MODE_REL);
}
static int gsc_notifier(struct notifier_block *nb, unsigned long action, void *data)
{
struct device *dev = data;
struct intel_huc *huc = container_of(nb, struct intel_huc, delayed_load.nb);
struct intel_gsc_intf *intf = &huc_to_gt(huc)->gsc.intf[0];
if (!intf->adev || &intf->adev->aux_dev.dev != dev)
return 0;
switch (action) {
case BUS_NOTIFY_BOUND_DRIVER: /* mei driver bound to aux device */
gsc_init_done(huc);
break;
case BUS_NOTIFY_DRIVER_NOT_BOUND: /* mei driver fails to be bound */
case BUS_NOTIFY_UNBIND_DRIVER: /* mei driver about to be unbound */
huc_info(huc, "MEI driver not bound, disabling load\n");
gsc_init_error(huc);
break;
}
return 0;
}
void intel_huc_register_gsc_notifier(struct intel_huc *huc, const struct bus_type *bus)
{
int ret;
if (!intel_huc_is_loaded_by_gsc(huc))
return;
huc->delayed_load.nb.notifier_call = gsc_notifier;
ret = bus_register_notifier(bus, &huc->delayed_load.nb);
if (ret) {
huc_err(huc, "failed to register GSC notifier %pe\n", ERR_PTR(ret));
huc->delayed_load.nb.notifier_call = NULL;
gsc_init_error(huc);
}
}
void intel_huc_unregister_gsc_notifier(struct intel_huc *huc, const struct bus_type *bus)
{
if (!huc->delayed_load.nb.notifier_call)
return;
delayed_huc_load_complete(huc);
bus_unregister_notifier(bus, &huc->delayed_load.nb);
huc->delayed_load.nb.notifier_call = NULL;
}
static void delayed_huc_load_init(struct intel_huc *huc)
{
/*
* Initialize fence to be complete as this is expected to be complete
* unless there is a delayed HuC load in progress.
*/
i915_sw_fence_init(&huc->delayed_load.fence,
sw_fence_dummy_notify);
i915_sw_fence_commit(&huc->delayed_load.fence);
hrtimer_init(&huc->delayed_load.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
huc->delayed_load.timer.function = huc_delayed_load_timer_callback;
}
static void delayed_huc_load_fini(struct intel_huc *huc)
{
/*
* the fence is initialized in init_early, so we need to clean it up
* even if HuC loading is off.
*/
delayed_huc_load_complete(huc);
i915_sw_fence_fini(&huc->delayed_load.fence);
}
int intel_huc_sanitize(struct intel_huc *huc)
{
delayed_huc_load_complete(huc);
intel_uc_fw_sanitize(&huc->fw);
return 0;
}
static bool vcs_supported(struct intel_gt *gt)
{
intel_engine_mask_t mask = gt->info.engine_mask;
/*
* We reach here from i915_driver_early_probe for the primary GT before
* its engine mask is set, so we use the device info engine mask for it;
* this means we're not taking VCS fusing into account, but if the
* primary GT supports VCS engines we expect at least one of them to
* remain unfused so we're fine.
* For other GTs we expect the GT-specific mask to be set before we
* call this function.
*/
GEM_BUG_ON(!gt_is_root(gt) && !gt->info.engine_mask);
if (gt_is_root(gt))
mask = INTEL_INFO(gt->i915)->platform_engine_mask;
else
mask = gt->info.engine_mask;
return __ENGINE_INSTANCES_MASK(mask, VCS0, I915_MAX_VCS);
}
void intel_huc_init_early(struct intel_huc *huc)
{
struct drm_i915_private *i915 = huc_to_gt(huc)->i915;
struct intel_gt *gt = huc_to_gt(huc);
intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC, true);
/*
* we always init the fence as already completed, even if HuC is not
* supported. This way we don't have to distinguish between HuC not
* supported/disabled or already loaded, and can focus on if the load
* is currently in progress (fence not complete) or not, which is what
* we care about for stalling userspace submissions.
*/
delayed_huc_load_init(huc);
if (!vcs_supported(gt)) {
intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_NOT_SUPPORTED);
return;
}
if (GRAPHICS_VER(i915) >= 11) {
huc->status[INTEL_HUC_AUTH_BY_GUC].reg = GEN11_HUC_KERNEL_LOAD_INFO;
huc->status[INTEL_HUC_AUTH_BY_GUC].mask = HUC_LOAD_SUCCESSFUL;
huc->status[INTEL_HUC_AUTH_BY_GUC].value = HUC_LOAD_SUCCESSFUL;
} else {
huc->status[INTEL_HUC_AUTH_BY_GUC].reg = HUC_STATUS2;
huc->status[INTEL_HUC_AUTH_BY_GUC].mask = HUC_FW_VERIFIED;
huc->status[INTEL_HUC_AUTH_BY_GUC].value = HUC_FW_VERIFIED;
}
if (IS_DG2(i915)) {
huc->status[INTEL_HUC_AUTH_BY_GSC].reg = GEN11_HUC_KERNEL_LOAD_INFO;
huc->status[INTEL_HUC_AUTH_BY_GSC].mask = HUC_LOAD_SUCCESSFUL;
huc->status[INTEL_HUC_AUTH_BY_GSC].value = HUC_LOAD_SUCCESSFUL;
} else {
huc->status[INTEL_HUC_AUTH_BY_GSC].reg = HECI_FWSTS(MTL_GSC_HECI1_BASE, 5);
huc->status[INTEL_HUC_AUTH_BY_GSC].mask = HECI1_FWSTS5_HUC_AUTH_DONE;
huc->status[INTEL_HUC_AUTH_BY_GSC].value = HECI1_FWSTS5_HUC_AUTH_DONE;
}
}
#define HUC_LOAD_MODE_STRING(x) (x ? "GSC" : "legacy")
static int check_huc_loading_mode(struct intel_huc *huc)
{
struct intel_gt *gt = huc_to_gt(huc);
bool gsc_enabled = huc->fw.has_gsc_headers;
/*
* The fuse for HuC load via GSC is only valid on platforms that have
* GuC deprivilege.
*/
if (HAS_GUC_DEPRIVILEGE(gt->i915))
huc->loaded_via_gsc = intel_uncore_read(gt->uncore, GUC_SHIM_CONTROL2) &
GSC_LOADS_HUC;
if (huc->loaded_via_gsc && !gsc_enabled) {
huc_err(huc, "HW requires a GSC-enabled blob, but we found a legacy one\n");
return -ENOEXEC;
}
/*
* On newer platforms we have GSC-enabled binaries but we load the HuC
* via DMA. To do so we need to find the location of the legacy-style
* binary inside the GSC-enabled one, which we do at fetch time. Make
* sure that we were able to do so if the fuse says we need to load via
* DMA and the binary is GSC-enabled.
*/
if (!huc->loaded_via_gsc && gsc_enabled && !huc->fw.dma_start_offset) {
huc_err(huc, "HW in DMA mode, but we have an incompatible GSC-enabled blob\n");
return -ENOEXEC;
}
/*
* If the HuC is loaded via GSC, we need to be able to access the GSC.
* On DG2 this is done via the mei components, while on newer platforms
* it is done via the GSCCS,
*/
if (huc->loaded_via_gsc) {
if (IS_DG2(gt->i915)) {
if (!IS_ENABLED(CONFIG_INTEL_MEI_PXP) ||
!IS_ENABLED(CONFIG_INTEL_MEI_GSC)) {
huc_info(huc, "can't load due to missing mei modules\n");
return -EIO;
}
} else {
if (!HAS_ENGINE(gt, GSC0)) {
huc_info(huc, "can't load due to missing GSCCS\n");
return -EIO;
}
}
}
huc_dbg(huc, "loaded by GSC = %s\n", str_yes_no(huc->loaded_via_gsc));
return 0;
}
int intel_huc_init(struct intel_huc *huc)
{
struct intel_gt *gt = huc_to_gt(huc);
int err;
err = check_huc_loading_mode(huc);
if (err)
goto out;
if (HAS_ENGINE(gt, GSC0)) {
struct i915_vma *vma;
vma = intel_guc_allocate_vma(>->uc.guc, PXP43_HUC_AUTH_INOUT_SIZE * 2);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
huc_info(huc, "Failed to allocate heci pkt\n");
goto out;
}
huc->heci_pkt = vma;
}
err = intel_uc_fw_init(&huc->fw);
if (err)
goto out_pkt;
intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOADABLE);
return 0;
out_pkt:
if (huc->heci_pkt)
i915_vma_unpin_and_release(&huc->heci_pkt, 0);
out:
intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_INIT_FAIL);
huc_info(huc, "initialization failed %pe\n", ERR_PTR(err));
return err;
}
void intel_huc_fini(struct intel_huc *huc)
{
/*
* the fence is initialized in init_early, so we need to clean it up
* even if HuC loading is off.
*/
delayed_huc_load_fini(huc);
if (huc->heci_pkt)
i915_vma_unpin_and_release(&huc->heci_pkt, 0);
if (intel_uc_fw_is_loadable(&huc->fw))
intel_uc_fw_fini(&huc->fw);
}
void intel_huc_suspend(struct intel_huc *huc)
{
if (!intel_uc_fw_is_loadable(&huc->fw))
return;
/*
* in the unlikely case that we're suspending before the GSC has
* completed its loading sequence, just stop waiting. We'll restart
* on resume.
*/
delayed_huc_load_complete(huc);
}
static const char *auth_mode_string(struct intel_huc *huc,
enum intel_huc_authentication_type type)
{
bool partial = huc->fw.has_gsc_headers && type == INTEL_HUC_AUTH_BY_GUC;
return partial ? "clear media" : "all workloads";
}
int intel_huc_wait_for_auth_complete(struct intel_huc *huc,
enum intel_huc_authentication_type type)
{
struct intel_gt *gt = huc_to_gt(huc);
int ret;
ret = __intel_wait_for_register(gt->uncore,
huc->status[type].reg,
huc->status[type].mask,
huc->status[type].value,
2, 50, NULL);
/* mark the load process as complete even if the wait failed */
delayed_huc_load_complete(huc);
if (ret) {
huc_err(huc, "firmware not verified for %s: %pe\n",
auth_mode_string(huc, type), ERR_PTR(ret));
intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
return ret;
}
intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_RUNNING);
huc_info(huc, "authenticated for %s\n", auth_mode_string(huc, type));
return 0;
}
/**
* intel_huc_auth() - Authenticate HuC uCode
* @huc: intel_huc structure
* @type: authentication type (via GuC or via GSC)
*
* Called after HuC and GuC firmware loading during intel_uc_init_hw().
*
* This function invokes the GuC action to authenticate the HuC firmware,
* passing the offset of the RSA signature to intel_guc_auth_huc(). It then
* waits for up to 50ms for firmware verification ACK.
*/
int intel_huc_auth(struct intel_huc *huc, enum intel_huc_authentication_type type)
{
struct intel_gt *gt = huc_to_gt(huc);
struct intel_guc *guc = >->uc.guc;
int ret;
if (!intel_uc_fw_is_loaded(&huc->fw))
return -ENOEXEC;
/* GSC will do the auth with the load */
if (intel_huc_is_loaded_by_gsc(huc))
return -ENODEV;
if (intel_huc_is_authenticated(huc, type))
return -EEXIST;
ret = i915_inject_probe_error(gt->i915, -ENXIO);
if (ret)
goto fail;
switch (type) {
case INTEL_HUC_AUTH_BY_GUC:
ret = intel_guc_auth_huc(guc, intel_guc_ggtt_offset(guc, huc->fw.rsa_data));
break;
case INTEL_HUC_AUTH_BY_GSC:
ret = intel_huc_fw_auth_via_gsccs(huc);
break;
default:
MISSING_CASE(type);
ret = -EINVAL;
}
if (ret)
goto fail;
/* Check authentication status, it should be done by now */
ret = intel_huc_wait_for_auth_complete(huc, type);
if (ret)
goto fail;
return 0;
fail:
huc_probe_error(huc, "%s authentication failed %pe\n",
auth_mode_string(huc, type), ERR_PTR(ret));
return ret;
}
bool intel_huc_is_authenticated(struct intel_huc *huc,
enum intel_huc_authentication_type type)
{
struct intel_gt *gt = huc_to_gt(huc);
intel_wakeref_t wakeref;
u32 status = 0;
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
status = intel_uncore_read(gt->uncore, huc->status[type].reg);
return (status & huc->status[type].mask) == huc->status[type].value;
}
static bool huc_is_fully_authenticated(struct intel_huc *huc)
{
struct intel_uc_fw *huc_fw = &huc->fw;
if (!huc_fw->has_gsc_headers)
return intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GUC);
else if (intel_huc_is_loaded_by_gsc(huc) || HAS_ENGINE(huc_to_gt(huc), GSC0))
return intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GSC);
else
return false;
}
/**
* intel_huc_check_status() - check HuC status
* @huc: intel_huc structure
*
* This function reads status register to verify if HuC
* firmware was successfully loaded.
*
* The return values match what is expected for the I915_PARAM_HUC_STATUS
* getparam.
*/
int intel_huc_check_status(struct intel_huc *huc)
{
struct intel_uc_fw *huc_fw = &huc->fw;
switch (__intel_uc_fw_status(huc_fw)) {
case INTEL_UC_FIRMWARE_NOT_SUPPORTED:
return -ENODEV;
case INTEL_UC_FIRMWARE_DISABLED:
return -EOPNOTSUPP;
case INTEL_UC_FIRMWARE_MISSING:
return -ENOPKG;
case INTEL_UC_FIRMWARE_ERROR:
return -ENOEXEC;
case INTEL_UC_FIRMWARE_INIT_FAIL:
return -ENOMEM;
case INTEL_UC_FIRMWARE_LOAD_FAIL:
return -EIO;
default:
break;
}
/*
* GSC-enabled binaries loaded via DMA are first partially
* authenticated by GuC and then fully authenticated by GSC
*/
if (huc_is_fully_authenticated(huc))
return 1; /* full auth */
else if (huc_fw->has_gsc_headers && !intel_huc_is_loaded_by_gsc(huc) &&
intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GUC))
return 2; /* clear media only */
else
return 0;
}
static bool huc_has_delayed_load(struct intel_huc *huc)
{
return intel_huc_is_loaded_by_gsc(huc) &&
(huc->delayed_load.status != INTEL_HUC_DELAYED_LOAD_ERROR);
}
void intel_huc_update_auth_status(struct intel_huc *huc)
{
if (!intel_uc_fw_is_loadable(&huc->fw))
return;
if (!huc->fw.has_gsc_headers)
return;
if (huc_is_fully_authenticated(huc))
intel_uc_fw_change_status(&huc->fw,
INTEL_UC_FIRMWARE_RUNNING);
else if (huc_has_delayed_load(huc))
huc_delayed_load_start(huc);
}
/**
* intel_huc_load_status - dump information about HuC load status
* @huc: the HuC
* @p: the &drm_printer
*
* Pretty printer for HuC load status.
*/
void intel_huc_load_status(struct intel_huc *huc, struct drm_printer *p)
{
struct intel_gt *gt = huc_to_gt(huc);
intel_wakeref_t wakeref;
if (!intel_huc_is_supported(huc)) {
drm_printf(p, "HuC not supported\n");
return;
}
if (!intel_huc_is_wanted(huc)) {
drm_printf(p, "HuC disabled\n");
return;
}
intel_uc_fw_dump(&huc->fw, p);
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
drm_printf(p, "HuC status: 0x%08x\n",
intel_uncore_read(gt->uncore, huc->status[INTEL_HUC_AUTH_BY_GUC].reg));
}
| linux-master | drivers/gpu/drm/i915/gt/uc/intel_huc.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
#include <linux/debugfs.h>
#include <linux/string_helpers.h>
#include <drm/drm_print.h>
#include "gt/intel_gt_debugfs.h"
#include "intel_guc_debugfs.h"
#include "intel_gsc_uc_debugfs.h"
#include "intel_huc_debugfs.h"
#include "intel_uc.h"
#include "intel_uc_debugfs.h"
static int uc_usage_show(struct seq_file *m, void *data)
{
struct intel_uc *uc = m->private;
struct drm_printer p = drm_seq_file_printer(m);
drm_printf(&p, "[guc] supported:%s wanted:%s used:%s\n",
str_yes_no(intel_uc_supports_guc(uc)),
str_yes_no(intel_uc_wants_guc(uc)),
str_yes_no(intel_uc_uses_guc(uc)));
drm_printf(&p, "[huc] supported:%s wanted:%s used:%s\n",
str_yes_no(intel_uc_supports_huc(uc)),
str_yes_no(intel_uc_wants_huc(uc)),
str_yes_no(intel_uc_uses_huc(uc)));
drm_printf(&p, "[submission] supported:%s wanted:%s used:%s\n",
str_yes_no(intel_uc_supports_guc_submission(uc)),
str_yes_no(intel_uc_wants_guc_submission(uc)),
str_yes_no(intel_uc_uses_guc_submission(uc)));
return 0;
}
DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(uc_usage);
void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root)
{
static const struct intel_gt_debugfs_file files[] = {
{ "usage", &uc_usage_fops, NULL },
};
struct dentry *root;
if (!gt_root)
return;
/* GuC and HuC go always in pair, no need to check both */
if (!intel_uc_supports_guc(uc))
return;
root = debugfs_create_dir("uc", gt_root);
if (IS_ERR(root))
return;
uc->guc.dbgfs_node = root;
intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), uc);
intel_gsc_uc_debugfs_register(&uc->gsc, root);
intel_guc_debugfs_register(&uc->guc, root);
intel_huc_debugfs_register(&uc->huc, root);
}
| linux-master | drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2022 Intel Corporation
*/
#include "gt/intel_gt_print.h"
#include "selftests/igt_spinner.h"
#include "selftests/igt_reset.h"
#include "selftests/intel_scheduler_helpers.h"
#include "gt/intel_engine_heartbeat.h"
#include "gem/selftests/mock_context.h"
#define BEAT_INTERVAL 100
static struct i915_request *nop_request(struct intel_engine_cs *engine)
{
struct i915_request *rq;
rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq))
return rq;
i915_request_get(rq);
i915_request_add(rq);
return rq;
}
static int intel_hang_guc(void *arg)
{
struct intel_gt *gt = arg;
int ret = 0;
struct i915_gem_context *ctx;
struct intel_context *ce;
struct igt_spinner spin;
struct i915_request *rq;
intel_wakeref_t wakeref;
struct i915_gpu_error *global = >->i915->gpu_error;
struct intel_engine_cs *engine = intel_selftest_find_any_engine(gt);
unsigned int reset_count;
u32 guc_status;
u32 old_beat;
if (!engine)
return 0;
ctx = kernel_context(gt->i915, NULL);
if (IS_ERR(ctx)) {
gt_err(gt, "Failed get kernel context: %pe\n", ctx);
return PTR_ERR(ctx);
}
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
ret = PTR_ERR(ce);
gt_err(gt, "Failed to create spinner request: %pe\n", ce);
goto err;
}
reset_count = i915_reset_count(global);
old_beat = engine->props.heartbeat_interval_ms;
ret = intel_engine_set_heartbeat(engine, BEAT_INTERVAL);
if (ret) {
gt_err(gt, "Failed to boost heatbeat interval: %pe\n", ERR_PTR(ret));
goto err;
}
ret = igt_spinner_init(&spin, engine->gt);
if (ret) {
gt_err(gt, "Failed to create spinner: %pe\n", ERR_PTR(ret));
goto err;
}
rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
intel_context_put(ce);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
gt_err(gt, "Failed to create spinner request: %pe\n", rq);
goto err_spin;
}
ret = request_add_spin(rq, &spin);
if (ret) {
i915_request_put(rq);
gt_err(gt, "Failed to add Spinner request: %pe\n", ERR_PTR(ret));
goto err_spin;
}
ret = intel_reset_guc(gt);
if (ret) {
i915_request_put(rq);
gt_err(gt, "Failed to reset GuC: %pe\n", ERR_PTR(ret));
goto err_spin;
}
guc_status = intel_uncore_read(gt->uncore, GUC_STATUS);
if (!(guc_status & GS_MIA_IN_RESET)) {
i915_request_put(rq);
gt_err(gt, "Failed to reset GuC: status = 0x%08X\n", guc_status);
ret = -EIO;
goto err_spin;
}
/* Wait for the heartbeat to cause a reset */
ret = intel_selftest_wait_for_rq(rq);
i915_request_put(rq);
if (ret) {
gt_err(gt, "Request failed to complete: %pe\n", ERR_PTR(ret));
goto err_spin;
}
if (i915_reset_count(global) == reset_count) {
gt_err(gt, "Failed to record a GPU reset\n");
ret = -EINVAL;
goto err_spin;
}
err_spin:
igt_spinner_end(&spin);
igt_spinner_fini(&spin);
intel_engine_set_heartbeat(engine, old_beat);
if (ret == 0) {
rq = nop_request(engine);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
goto err;
}
ret = intel_selftest_wait_for_rq(rq);
i915_request_put(rq);
if (ret) {
gt_err(gt, "No-op failed to complete: %pe\n", ERR_PTR(ret));
goto err;
}
}
err:
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
kernel_context_close(ctx);
return ret;
}
int intel_guc_hang_check(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(intel_hang_guc),
};
struct intel_gt *gt = to_gt(i915);
if (intel_gt_is_wedged(gt))
return 0;
if (!intel_uc_uses_guc_submission(>->uc))
return 0;
return intel_gt_live_subtests(tests, gt);
}
| linux-master | drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2016-2019 Intel Corporation
*/
#include <linux/circ_buf.h>
#include <linux/ktime.h>
#include <linux/time64.h>
#include <linux/string_helpers.h>
#include <linux/timekeeping.h>
#include "i915_drv.h"
#include "intel_guc_ct.h"
#include "intel_guc_print.h"
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
enum {
CT_DEAD_ALIVE = 0,
CT_DEAD_SETUP,
CT_DEAD_WRITE,
CT_DEAD_DEADLOCK,
CT_DEAD_H2G_HAS_ROOM,
CT_DEAD_READ,
CT_DEAD_PROCESS_FAILED,
};
static void ct_dead_ct_worker_func(struct work_struct *w);
#define CT_DEAD(ct, reason) \
do { \
if (!(ct)->dead_ct_reported) { \
(ct)->dead_ct_reason |= 1 << CT_DEAD_##reason; \
queue_work(system_unbound_wq, &(ct)->dead_ct_worker); \
} \
} while (0)
#else
#define CT_DEAD(ct, reason) do { } while (0)
#endif
static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
{
return container_of(ct, struct intel_guc, ct);
}
#define CT_ERROR(_ct, _fmt, ...) \
guc_err(ct_to_guc(_ct), "CT: " _fmt, ##__VA_ARGS__)
#ifdef CONFIG_DRM_I915_DEBUG_GUC
#define CT_DEBUG(_ct, _fmt, ...) \
guc_dbg(ct_to_guc(_ct), "CT: " _fmt, ##__VA_ARGS__)
#else
#define CT_DEBUG(...) do { } while (0)
#endif
#define CT_PROBE_ERROR(_ct, _fmt, ...) \
guc_probe_error(ct_to_guc(ct), "CT: " _fmt, ##__VA_ARGS__)
/**
* DOC: CTB Blob
*
* We allocate single blob to hold both CTB descriptors and buffers:
*
* +--------+-----------------------------------------------+------+
* | offset | contents | size |
* +========+===============================================+======+
* | 0x0000 | H2G `CTB Descriptor`_ (send) | |
* +--------+-----------------------------------------------+ 4K |
* | 0x0800 | G2H `CTB Descriptor`_ (recv) | |
* +--------+-----------------------------------------------+------+
* | 0x1000 | H2G `CT Buffer`_ (send) | n*4K |
* | | | |
* +--------+-----------------------------------------------+------+
* | 0x1000 | G2H `CT Buffer`_ (recv) | m*4K |
* | + n*4K | | |
* +--------+-----------------------------------------------+------+
*
* Size of each `CT Buffer`_ must be multiple of 4K.
* We don't expect too many messages in flight at any time, unless we are
* using the GuC submission. In that case each request requires a minimum
* 2 dwords which gives us a maximum 256 queue'd requests. Hopefully this
* enough space to avoid backpressure on the driver. We increase the size
* of the receive buffer (relative to the send) to ensure a G2H response
* CTB has a landing spot.
*/
#define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
#define CTB_H2G_BUFFER_SIZE (SZ_4K)
#define CTB_G2H_BUFFER_SIZE (4 * CTB_H2G_BUFFER_SIZE)
#define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 4)
struct ct_request {
struct list_head link;
u32 fence;
u32 status;
u32 response_len;
u32 *response_buf;
};
struct ct_incoming_msg {
struct list_head link;
u32 size;
u32 msg[];
};
enum { CTB_SEND = 0, CTB_RECV = 1 };
enum { CTB_OWNER_HOST = 0 };
static void ct_receive_tasklet_func(struct tasklet_struct *t);
static void ct_incoming_request_worker_func(struct work_struct *w);
/**
* intel_guc_ct_init_early - Initialize CT state without requiring device access
* @ct: pointer to CT struct
*/
void intel_guc_ct_init_early(struct intel_guc_ct *ct)
{
spin_lock_init(&ct->ctbs.send.lock);
spin_lock_init(&ct->ctbs.recv.lock);
spin_lock_init(&ct->requests.lock);
INIT_LIST_HEAD(&ct->requests.pending);
INIT_LIST_HEAD(&ct->requests.incoming);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
INIT_WORK(&ct->dead_ct_worker, ct_dead_ct_worker_func);
#endif
INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
tasklet_setup(&ct->receive_tasklet, ct_receive_tasklet_func);
init_waitqueue_head(&ct->wq);
}
static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc)
{
memset(desc, 0, sizeof(*desc));
}
static void guc_ct_buffer_reset(struct intel_guc_ct_buffer *ctb)
{
u32 space;
ctb->broken = false;
ctb->tail = 0;
ctb->head = 0;
space = CIRC_SPACE(ctb->tail, ctb->head, ctb->size) - ctb->resv_space;
atomic_set(&ctb->space, space);
guc_ct_buffer_desc_init(ctb->desc);
}
static void guc_ct_buffer_init(struct intel_guc_ct_buffer *ctb,
struct guc_ct_buffer_desc *desc,
u32 *cmds, u32 size_in_bytes, u32 resv_space)
{
GEM_BUG_ON(size_in_bytes % 4);
ctb->desc = desc;
ctb->cmds = cmds;
ctb->size = size_in_bytes / 4;
ctb->resv_space = resv_space / 4;
guc_ct_buffer_reset(ctb);
}
static int guc_action_control_ctb(struct intel_guc *guc, u32 control)
{
u32 request[HOST2GUC_CONTROL_CTB_REQUEST_MSG_LEN] = {
FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_CONTROL_CTB),
FIELD_PREP(HOST2GUC_CONTROL_CTB_REQUEST_MSG_1_CONTROL, control),
};
int ret;
GEM_BUG_ON(control != GUC_CTB_CONTROL_DISABLE && control != GUC_CTB_CONTROL_ENABLE);
/* CT control must go over MMIO */
ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
return ret > 0 ? -EPROTO : ret;
}
static int ct_control_enable(struct intel_guc_ct *ct, bool enable)
{
int err;
err = guc_action_control_ctb(ct_to_guc(ct), enable ?
GUC_CTB_CONTROL_ENABLE : GUC_CTB_CONTROL_DISABLE);
if (unlikely(err))
CT_PROBE_ERROR(ct, "Failed to control/%s CTB (%pe)\n",
str_enable_disable(enable), ERR_PTR(err));
return err;
}
static int ct_register_buffer(struct intel_guc_ct *ct, bool send,
u32 desc_addr, u32 buff_addr, u32 size)
{
int err;
err = intel_guc_self_cfg64(ct_to_guc(ct), send ?
GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_KEY :
GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_KEY,
desc_addr);
if (unlikely(err))
goto failed;
err = intel_guc_self_cfg64(ct_to_guc(ct), send ?
GUC_KLV_SELF_CFG_H2G_CTB_ADDR_KEY :
GUC_KLV_SELF_CFG_G2H_CTB_ADDR_KEY,
buff_addr);
if (unlikely(err))
goto failed;
err = intel_guc_self_cfg32(ct_to_guc(ct), send ?
GUC_KLV_SELF_CFG_H2G_CTB_SIZE_KEY :
GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY,
size);
if (unlikely(err))
failed:
CT_PROBE_ERROR(ct, "Failed to register %s buffer (%pe)\n",
send ? "SEND" : "RECV", ERR_PTR(err));
return err;
}
/**
* intel_guc_ct_init - Init buffer-based communication
* @ct: pointer to CT struct
*
* Allocate memory required for buffer-based communication.
*
* Return: 0 on success, a negative errno code on failure.
*/
int intel_guc_ct_init(struct intel_guc_ct *ct)
{
struct intel_guc *guc = ct_to_guc(ct);
struct guc_ct_buffer_desc *desc;
u32 blob_size;
u32 cmds_size;
u32 resv_space;
void *blob;
u32 *cmds;
int err;
err = i915_inject_probe_error(guc_to_gt(guc)->i915, -ENXIO);
if (err)
return err;
GEM_BUG_ON(ct->vma);
blob_size = 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE + CTB_G2H_BUFFER_SIZE;
err = intel_guc_allocate_and_map_vma(guc, blob_size, &ct->vma, &blob);
if (unlikely(err)) {
CT_PROBE_ERROR(ct, "Failed to allocate %u for CTB data (%pe)\n",
blob_size, ERR_PTR(err));
return err;
}
CT_DEBUG(ct, "base=%#x size=%u\n", intel_guc_ggtt_offset(guc, ct->vma), blob_size);
/* store pointers to desc and cmds for send ctb */
desc = blob;
cmds = blob + 2 * CTB_DESC_SIZE;
cmds_size = CTB_H2G_BUFFER_SIZE;
resv_space = 0;
CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u/%u\n", "send",
ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size,
resv_space);
guc_ct_buffer_init(&ct->ctbs.send, desc, cmds, cmds_size, resv_space);
/* store pointers to desc and cmds for recv ctb */
desc = blob + CTB_DESC_SIZE;
cmds = blob + 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE;
cmds_size = CTB_G2H_BUFFER_SIZE;
resv_space = G2H_ROOM_BUFFER_SIZE;
CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u/%u\n", "recv",
ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size,
resv_space);
guc_ct_buffer_init(&ct->ctbs.recv, desc, cmds, cmds_size, resv_space);
return 0;
}
/**
* intel_guc_ct_fini - Fini buffer-based communication
* @ct: pointer to CT struct
*
* Deallocate memory required for buffer-based communication.
*/
void intel_guc_ct_fini(struct intel_guc_ct *ct)
{
GEM_BUG_ON(ct->enabled);
tasklet_kill(&ct->receive_tasklet);
i915_vma_unpin_and_release(&ct->vma, I915_VMA_RELEASE_MAP);
memset(ct, 0, sizeof(*ct));
}
/**
* intel_guc_ct_enable - Enable buffer based command transport.
* @ct: pointer to CT struct
*
* Return: 0 on success, a negative errno code on failure.
*/
int intel_guc_ct_enable(struct intel_guc_ct *ct)
{
struct intel_guc *guc = ct_to_guc(ct);
u32 base, desc, cmds, size;
void *blob;
int err;
GEM_BUG_ON(ct->enabled);
/* vma should be already allocated and map'ed */
GEM_BUG_ON(!ct->vma);
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(ct->vma->obj));
base = intel_guc_ggtt_offset(guc, ct->vma);
/* blob should start with send descriptor */
blob = __px_vaddr(ct->vma->obj);
GEM_BUG_ON(blob != ct->ctbs.send.desc);
/* (re)initialize descriptors */
guc_ct_buffer_reset(&ct->ctbs.send);
guc_ct_buffer_reset(&ct->ctbs.recv);
/*
* Register both CT buffers starting with RECV buffer.
* Descriptors are in first half of the blob.
*/
desc = base + ptrdiff(ct->ctbs.recv.desc, blob);
cmds = base + ptrdiff(ct->ctbs.recv.cmds, blob);
size = ct->ctbs.recv.size * 4;
err = ct_register_buffer(ct, false, desc, cmds, size);
if (unlikely(err))
goto err_out;
desc = base + ptrdiff(ct->ctbs.send.desc, blob);
cmds = base + ptrdiff(ct->ctbs.send.cmds, blob);
size = ct->ctbs.send.size * 4;
err = ct_register_buffer(ct, true, desc, cmds, size);
if (unlikely(err))
goto err_out;
err = ct_control_enable(ct, true);
if (unlikely(err))
goto err_out;
ct->enabled = true;
ct->stall_time = KTIME_MAX;
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
ct->dead_ct_reported = false;
ct->dead_ct_reason = CT_DEAD_ALIVE;
#endif
return 0;
err_out:
CT_PROBE_ERROR(ct, "Failed to enable CTB (%pe)\n", ERR_PTR(err));
CT_DEAD(ct, SETUP);
return err;
}
/**
* intel_guc_ct_disable - Disable buffer based command transport.
* @ct: pointer to CT struct
*/
void intel_guc_ct_disable(struct intel_guc_ct *ct)
{
struct intel_guc *guc = ct_to_guc(ct);
GEM_BUG_ON(!ct->enabled);
ct->enabled = false;
if (intel_guc_is_fw_running(guc)) {
ct_control_enable(ct, false);
}
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
static void ct_track_lost_and_found(struct intel_guc_ct *ct, u32 fence, u32 action)
{
unsigned int lost = fence % ARRAY_SIZE(ct->requests.lost_and_found);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
unsigned long entries[SZ_32];
unsigned int n;
n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
/* May be called under spinlock, so avoid sleeping */
ct->requests.lost_and_found[lost].stack = stack_depot_save(entries, n, GFP_NOWAIT);
#endif
ct->requests.lost_and_found[lost].fence = fence;
ct->requests.lost_and_found[lost].action = action;
}
#endif
static u32 ct_get_next_fence(struct intel_guc_ct *ct)
{
/* For now it's trivial */
return ++ct->requests.last_fence;
}
static int ct_write(struct intel_guc_ct *ct,
const u32 *action,
u32 len /* in dwords */,
u32 fence, u32 flags)
{
struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
struct guc_ct_buffer_desc *desc = ctb->desc;
u32 tail = ctb->tail;
u32 size = ctb->size;
u32 header;
u32 hxg;
u32 type;
u32 *cmds = ctb->cmds;
unsigned int i;
if (unlikely(desc->status))
goto corrupted;
GEM_BUG_ON(tail > size);
#ifdef CONFIG_DRM_I915_DEBUG_GUC
if (unlikely(tail != READ_ONCE(desc->tail))) {
CT_ERROR(ct, "Tail was modified %u != %u\n",
desc->tail, tail);
desc->status |= GUC_CTB_STATUS_MISMATCH;
goto corrupted;
}
if (unlikely(READ_ONCE(desc->head) >= size)) {
CT_ERROR(ct, "Invalid head offset %u >= %u)\n",
desc->head, size);
desc->status |= GUC_CTB_STATUS_OVERFLOW;
goto corrupted;
}
#endif
/*
* dw0: CT header (including fence)
* dw1: HXG header (including action code)
* dw2+: action data
*/
header = FIELD_PREP(GUC_CTB_MSG_0_FORMAT, GUC_CTB_FORMAT_HXG) |
FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) |
FIELD_PREP(GUC_CTB_MSG_0_FENCE, fence);
type = (flags & INTEL_GUC_CT_SEND_NB) ? GUC_HXG_TYPE_FAST_REQUEST :
GUC_HXG_TYPE_REQUEST;
hxg = FIELD_PREP(GUC_HXG_MSG_0_TYPE, type) |
FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION |
GUC_HXG_REQUEST_MSG_0_DATA0, action[0]);
CT_DEBUG(ct, "writing (tail %u) %*ph %*ph %*ph\n",
tail, 4, &header, 4, &hxg, 4 * (len - 1), &action[1]);
cmds[tail] = header;
tail = (tail + 1) % size;
cmds[tail] = hxg;
tail = (tail + 1) % size;
for (i = 1; i < len; i++) {
cmds[tail] = action[i];
tail = (tail + 1) % size;
}
GEM_BUG_ON(tail > size);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
ct_track_lost_and_found(ct, fence,
FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, action[0]));
#endif
/*
* make sure H2G buffer update and LRC tail update (if this triggering a
* submission) are visible before updating the descriptor tail
*/
intel_guc_write_barrier(ct_to_guc(ct));
/* update local copies */
ctb->tail = tail;
GEM_BUG_ON(atomic_read(&ctb->space) < len + GUC_CTB_HDR_LEN);
atomic_sub(len + GUC_CTB_HDR_LEN, &ctb->space);
/* now update descriptor */
WRITE_ONCE(desc->tail, tail);
return 0;
corrupted:
CT_ERROR(ct, "Corrupted descriptor head=%u tail=%u status=%#x\n",
desc->head, desc->tail, desc->status);
CT_DEAD(ct, WRITE);
ctb->broken = true;
return -EPIPE;
}
/**
* wait_for_ct_request_update - Wait for CT request state update.
* @ct: pointer to CT
* @req: pointer to pending request
* @status: placeholder for status
*
* For each sent request, GuC shall send back CT response message.
* Our message handler will update status of tracked request once
* response message with given fence is received. Wait here and
* check for valid response status value.
*
* Return:
* * 0 response received (status is valid)
* * -ETIMEDOUT no response within hardcoded timeout
*/
static int wait_for_ct_request_update(struct intel_guc_ct *ct, struct ct_request *req, u32 *status)
{
int err;
bool ct_enabled;
/*
* Fast commands should complete in less than 10us, so sample quickly
* up to that length of time, then switch to a slower sleep-wait loop.
* No GuC command should ever take longer than 10ms but many GuC
* commands can be inflight at time, so use a 1s timeout on the slower
* sleep-wait loop.
*/
#define GUC_CTB_RESPONSE_TIMEOUT_SHORT_MS 10
#define GUC_CTB_RESPONSE_TIMEOUT_LONG_MS 1000
#define done \
(!(ct_enabled = intel_guc_ct_enabled(ct)) || \
FIELD_GET(GUC_HXG_MSG_0_ORIGIN, READ_ONCE(req->status)) == \
GUC_HXG_ORIGIN_GUC)
err = wait_for_us(done, GUC_CTB_RESPONSE_TIMEOUT_SHORT_MS);
if (err)
err = wait_for(done, GUC_CTB_RESPONSE_TIMEOUT_LONG_MS);
#undef done
if (!ct_enabled)
err = -ENODEV;
*status = req->status;
return err;
}
#define GUC_CTB_TIMEOUT_MS 1500
static inline bool ct_deadlocked(struct intel_guc_ct *ct)
{
long timeout = GUC_CTB_TIMEOUT_MS;
bool ret = ktime_ms_delta(ktime_get(), ct->stall_time) > timeout;
if (unlikely(ret)) {
struct guc_ct_buffer_desc *send = ct->ctbs.send.desc;
struct guc_ct_buffer_desc *recv = ct->ctbs.send.desc;
CT_ERROR(ct, "Communication stalled for %lld ms, desc status=%#x,%#x\n",
ktime_ms_delta(ktime_get(), ct->stall_time),
send->status, recv->status);
CT_ERROR(ct, "H2G Space: %u (Bytes)\n",
atomic_read(&ct->ctbs.send.space) * 4);
CT_ERROR(ct, "Head: %u (Dwords)\n", ct->ctbs.send.desc->head);
CT_ERROR(ct, "Tail: %u (Dwords)\n", ct->ctbs.send.desc->tail);
CT_ERROR(ct, "G2H Space: %u (Bytes)\n",
atomic_read(&ct->ctbs.recv.space) * 4);
CT_ERROR(ct, "Head: %u\n (Dwords)", ct->ctbs.recv.desc->head);
CT_ERROR(ct, "Tail: %u\n (Dwords)", ct->ctbs.recv.desc->tail);
CT_DEAD(ct, DEADLOCK);
ct->ctbs.send.broken = true;
}
return ret;
}
static inline bool g2h_has_room(struct intel_guc_ct *ct, u32 g2h_len_dw)
{
struct intel_guc_ct_buffer *ctb = &ct->ctbs.recv;
/*
* We leave a certain amount of space in the G2H CTB buffer for
* unexpected G2H CTBs (e.g. logging, engine hang, etc...)
*/
return !g2h_len_dw || atomic_read(&ctb->space) >= g2h_len_dw;
}
static inline void g2h_reserve_space(struct intel_guc_ct *ct, u32 g2h_len_dw)
{
lockdep_assert_held(&ct->ctbs.send.lock);
GEM_BUG_ON(!g2h_has_room(ct, g2h_len_dw));
if (g2h_len_dw)
atomic_sub(g2h_len_dw, &ct->ctbs.recv.space);
}
static inline void g2h_release_space(struct intel_guc_ct *ct, u32 g2h_len_dw)
{
atomic_add(g2h_len_dw, &ct->ctbs.recv.space);
}
static inline bool h2g_has_room(struct intel_guc_ct *ct, u32 len_dw)
{
struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
struct guc_ct_buffer_desc *desc = ctb->desc;
u32 head;
u32 space;
if (atomic_read(&ctb->space) >= len_dw)
return true;
head = READ_ONCE(desc->head);
if (unlikely(head > ctb->size)) {
CT_ERROR(ct, "Invalid head offset %u >= %u)\n",
head, ctb->size);
desc->status |= GUC_CTB_STATUS_OVERFLOW;
ctb->broken = true;
CT_DEAD(ct, H2G_HAS_ROOM);
return false;
}
space = CIRC_SPACE(ctb->tail, head, ctb->size);
atomic_set(&ctb->space, space);
return space >= len_dw;
}
static int has_room_nb(struct intel_guc_ct *ct, u32 h2g_dw, u32 g2h_dw)
{
bool h2g = h2g_has_room(ct, h2g_dw);
bool g2h = g2h_has_room(ct, g2h_dw);
lockdep_assert_held(&ct->ctbs.send.lock);
if (unlikely(!h2g || !g2h)) {
if (ct->stall_time == KTIME_MAX)
ct->stall_time = ktime_get();
/* Be paranoid and kick G2H tasklet to free credits */
if (!g2h)
tasklet_hi_schedule(&ct->receive_tasklet);
if (unlikely(ct_deadlocked(ct)))
return -EPIPE;
else
return -EBUSY;
}
ct->stall_time = KTIME_MAX;
return 0;
}
#define G2H_LEN_DW(f) ({ \
typeof(f) f_ = (f); \
FIELD_GET(INTEL_GUC_CT_SEND_G2H_DW_MASK, f_) ? \
FIELD_GET(INTEL_GUC_CT_SEND_G2H_DW_MASK, f_) + \
GUC_CTB_HXG_MSG_MIN_LEN : 0; \
})
static int ct_send_nb(struct intel_guc_ct *ct,
const u32 *action,
u32 len,
u32 flags)
{
struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
unsigned long spin_flags;
u32 g2h_len_dw = G2H_LEN_DW(flags);
u32 fence;
int ret;
spin_lock_irqsave(&ctb->lock, spin_flags);
ret = has_room_nb(ct, len + GUC_CTB_HDR_LEN, g2h_len_dw);
if (unlikely(ret))
goto out;
fence = ct_get_next_fence(ct);
ret = ct_write(ct, action, len, fence, flags);
if (unlikely(ret))
goto out;
g2h_reserve_space(ct, g2h_len_dw);
intel_guc_notify(ct_to_guc(ct));
out:
spin_unlock_irqrestore(&ctb->lock, spin_flags);
return ret;
}
static int ct_send(struct intel_guc_ct *ct,
const u32 *action,
u32 len,
u32 *response_buf,
u32 response_buf_size,
u32 *status)
{
struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
struct ct_request request;
unsigned long flags;
unsigned int sleep_period_ms = 1;
bool send_again;
u32 fence;
int err;
GEM_BUG_ON(!ct->enabled);
GEM_BUG_ON(!len);
GEM_BUG_ON(len > GUC_CTB_HXG_MSG_MAX_LEN - GUC_CTB_HDR_LEN);
GEM_BUG_ON(!response_buf && response_buf_size);
might_sleep();
resend:
send_again = false;
/*
* We use a lazy spin wait loop here as we believe that if the CT
* buffers are sized correctly the flow control condition should be
* rare. Reserving the maximum size in the G2H credits as we don't know
* how big the response is going to be.
*/
retry:
spin_lock_irqsave(&ctb->lock, flags);
if (unlikely(!h2g_has_room(ct, len + GUC_CTB_HDR_LEN) ||
!g2h_has_room(ct, GUC_CTB_HXG_MSG_MAX_LEN))) {
if (ct->stall_time == KTIME_MAX)
ct->stall_time = ktime_get();
spin_unlock_irqrestore(&ctb->lock, flags);
if (unlikely(ct_deadlocked(ct)))
return -EPIPE;
if (msleep_interruptible(sleep_period_ms))
return -EINTR;
sleep_period_ms = sleep_period_ms << 1;
goto retry;
}
ct->stall_time = KTIME_MAX;
fence = ct_get_next_fence(ct);
request.fence = fence;
request.status = 0;
request.response_len = response_buf_size;
request.response_buf = response_buf;
spin_lock(&ct->requests.lock);
list_add_tail(&request.link, &ct->requests.pending);
spin_unlock(&ct->requests.lock);
err = ct_write(ct, action, len, fence, 0);
g2h_reserve_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
spin_unlock_irqrestore(&ctb->lock, flags);
if (unlikely(err))
goto unlink;
intel_guc_notify(ct_to_guc(ct));
err = wait_for_ct_request_update(ct, &request, status);
g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
if (unlikely(err)) {
if (err == -ENODEV)
/* wait_for_ct_request_update returns -ENODEV on reset/suspend in progress.
* In this case, output is debug rather than error info
*/
CT_DEBUG(ct, "Request %#x (fence %u) cancelled as CTB is disabled\n",
action[0], request.fence);
else
CT_ERROR(ct, "No response for request %#x (fence %u)\n",
action[0], request.fence);
goto unlink;
}
if (FIELD_GET(GUC_HXG_MSG_0_TYPE, *status) == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
CT_DEBUG(ct, "retrying request %#x (%u)\n", *action,
FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, *status));
send_again = true;
goto unlink;
}
if (FIELD_GET(GUC_HXG_MSG_0_TYPE, *status) != GUC_HXG_TYPE_RESPONSE_SUCCESS) {
err = -EIO;
goto unlink;
}
if (response_buf) {
/* There shall be no data in the status */
WARN_ON(FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, request.status));
/* Return actual response len */
err = request.response_len;
} else {
/* There shall be no response payload */
WARN_ON(request.response_len);
/* Return data decoded from the status dword */
err = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, *status);
}
unlink:
spin_lock_irqsave(&ct->requests.lock, flags);
list_del(&request.link);
spin_unlock_irqrestore(&ct->requests.lock, flags);
if (unlikely(send_again))
goto resend;
return err;
}
/*
* Command Transport (CT) buffer based GuC send function.
*/
int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
u32 *response_buf, u32 response_buf_size, u32 flags)
{
u32 status = ~0; /* undefined */
int ret;
if (unlikely(!ct->enabled)) {
struct intel_guc *guc = ct_to_guc(ct);
struct intel_uc *uc = container_of(guc, struct intel_uc, guc);
WARN(!uc->reset_in_progress, "Unexpected send: action=%#x\n", *action);
return -ENODEV;
}
if (unlikely(ct->ctbs.send.broken))
return -EPIPE;
if (flags & INTEL_GUC_CT_SEND_NB)
return ct_send_nb(ct, action, len, flags);
ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
if (unlikely(ret < 0)) {
if (ret != -ENODEV)
CT_ERROR(ct, "Sending action %#x failed (%pe) status=%#X\n",
action[0], ERR_PTR(ret), status);
} else if (unlikely(ret)) {
CT_DEBUG(ct, "send action %#x returned %d (%#x)\n",
action[0], ret, ret);
}
return ret;
}
static struct ct_incoming_msg *ct_alloc_msg(u32 num_dwords)
{
struct ct_incoming_msg *msg;
msg = kmalloc(struct_size(msg, msg, num_dwords), GFP_ATOMIC);
if (msg)
msg->size = num_dwords;
return msg;
}
static void ct_free_msg(struct ct_incoming_msg *msg)
{
kfree(msg);
}
/*
* Return: number available remaining dwords to read (0 if empty)
* or a negative error code on failure
*/
static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg)
{
struct intel_guc_ct_buffer *ctb = &ct->ctbs.recv;
struct guc_ct_buffer_desc *desc = ctb->desc;
u32 head = ctb->head;
u32 tail = READ_ONCE(desc->tail);
u32 size = ctb->size;
u32 *cmds = ctb->cmds;
s32 available;
unsigned int len;
unsigned int i;
u32 header;
if (unlikely(ctb->broken))
return -EPIPE;
if (unlikely(desc->status)) {
u32 status = desc->status;
if (status & GUC_CTB_STATUS_UNUSED) {
/*
* Potentially valid if a CLIENT_RESET request resulted in
* contexts/engines being reset. But should never happen as
* no contexts should be active when CLIENT_RESET is sent.
*/
CT_ERROR(ct, "Unexpected G2H after GuC has stopped!\n");
status &= ~GUC_CTB_STATUS_UNUSED;
}
if (status)
goto corrupted;
}
GEM_BUG_ON(head > size);
#ifdef CONFIG_DRM_I915_DEBUG_GUC
if (unlikely(head != READ_ONCE(desc->head))) {
CT_ERROR(ct, "Head was modified %u != %u\n",
desc->head, head);
desc->status |= GUC_CTB_STATUS_MISMATCH;
goto corrupted;
}
#endif
if (unlikely(tail >= size)) {
CT_ERROR(ct, "Invalid tail offset %u >= %u)\n",
tail, size);
desc->status |= GUC_CTB_STATUS_OVERFLOW;
goto corrupted;
}
/* tail == head condition indicates empty */
available = tail - head;
if (unlikely(available == 0)) {
*msg = NULL;
return 0;
}
/* beware of buffer wrap case */
if (unlikely(available < 0))
available += size;
CT_DEBUG(ct, "available %d (%u:%u:%u)\n", available, head, tail, size);
GEM_BUG_ON(available < 0);
header = cmds[head];
head = (head + 1) % size;
/* message len with header */
len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, header) + GUC_CTB_MSG_MIN_LEN;
if (unlikely(len > (u32)available)) {
CT_ERROR(ct, "Incomplete message %*ph %*ph %*ph\n",
4, &header,
4 * (head + available - 1 > size ?
size - head : available - 1), &cmds[head],
4 * (head + available - 1 > size ?
available - 1 - size + head : 0), &cmds[0]);
desc->status |= GUC_CTB_STATUS_UNDERFLOW;
goto corrupted;
}
*msg = ct_alloc_msg(len);
if (!*msg) {
CT_ERROR(ct, "No memory for message %*ph %*ph %*ph\n",
4, &header,
4 * (head + available - 1 > size ?
size - head : available - 1), &cmds[head],
4 * (head + available - 1 > size ?
available - 1 - size + head : 0), &cmds[0]);
return available;
}
(*msg)->msg[0] = header;
for (i = 1; i < len; i++) {
(*msg)->msg[i] = cmds[head];
head = (head + 1) % size;
}
CT_DEBUG(ct, "received %*ph\n", 4 * len, (*msg)->msg);
/* update local copies */
ctb->head = head;
/* now update descriptor */
WRITE_ONCE(desc->head, head);
intel_guc_write_barrier(ct_to_guc(ct));
return available - len;
corrupted:
CT_ERROR(ct, "Corrupted descriptor head=%u tail=%u status=%#x\n",
desc->head, desc->tail, desc->status);
ctb->broken = true;
CT_DEAD(ct, READ);
return -EPIPE;
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
static bool ct_check_lost_and_found(struct intel_guc_ct *ct, u32 fence)
{
unsigned int n;
char *buf = NULL;
bool found = false;
lockdep_assert_held(&ct->requests.lock);
for (n = 0; n < ARRAY_SIZE(ct->requests.lost_and_found); n++) {
if (ct->requests.lost_and_found[n].fence != fence)
continue;
found = true;
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
buf = kmalloc(SZ_4K, GFP_NOWAIT);
if (buf && stack_depot_snprint(ct->requests.lost_and_found[n].stack,
buf, SZ_4K, 0)) {
CT_ERROR(ct, "Fence %u was used by action %#04x sent at\n%s",
fence, ct->requests.lost_and_found[n].action, buf);
break;
}
#endif
CT_ERROR(ct, "Fence %u was used by action %#04x\n",
fence, ct->requests.lost_and_found[n].action);
break;
}
kfree(buf);
return found;
}
#else
static bool ct_check_lost_and_found(struct intel_guc_ct *ct, u32 fence)
{
return false;
}
#endif
static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *response)
{
u32 len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, response->msg[0]);
u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, response->msg[0]);
const u32 *hxg = &response->msg[GUC_CTB_MSG_MIN_LEN];
const u32 *data = &hxg[GUC_HXG_MSG_MIN_LEN];
u32 datalen = len - GUC_HXG_MSG_MIN_LEN;
struct ct_request *req;
unsigned long flags;
bool found = false;
int err = 0;
GEM_BUG_ON(len < GUC_HXG_MSG_MIN_LEN);
GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]) != GUC_HXG_ORIGIN_GUC);
GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_RESPONSE_SUCCESS &&
FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_NO_RESPONSE_RETRY &&
FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_RESPONSE_FAILURE);
CT_DEBUG(ct, "response fence %u status %#x\n", fence, hxg[0]);
spin_lock_irqsave(&ct->requests.lock, flags);
list_for_each_entry(req, &ct->requests.pending, link) {
if (unlikely(fence != req->fence)) {
CT_DEBUG(ct, "request %u awaits response\n",
req->fence);
continue;
}
if (unlikely(datalen > req->response_len)) {
CT_ERROR(ct, "Response %u too long (datalen %u > %u)\n",
req->fence, datalen, req->response_len);
datalen = min(datalen, req->response_len);
err = -EMSGSIZE;
}
if (datalen)
memcpy(req->response_buf, data, 4 * datalen);
req->response_len = datalen;
WRITE_ONCE(req->status, hxg[0]);
found = true;
break;
}
if (!found) {
CT_ERROR(ct, "Unsolicited response message: len %u, data %#x (fence %u, last %u)\n",
len, hxg[0], fence, ct->requests.last_fence);
if (!ct_check_lost_and_found(ct, fence)) {
list_for_each_entry(req, &ct->requests.pending, link)
CT_ERROR(ct, "request %u awaits response\n",
req->fence);
}
err = -ENOKEY;
}
spin_unlock_irqrestore(&ct->requests.lock, flags);
if (unlikely(err))
return err;
ct_free_msg(response);
return 0;
}
static int ct_process_request(struct intel_guc_ct *ct, struct ct_incoming_msg *request)
{
struct intel_guc *guc = ct_to_guc(ct);
const u32 *hxg;
const u32 *payload;
u32 hxg_len, action, len;
int ret;
hxg = &request->msg[GUC_CTB_MSG_MIN_LEN];
hxg_len = request->size - GUC_CTB_MSG_MIN_LEN;
payload = &hxg[GUC_HXG_MSG_MIN_LEN];
action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
len = hxg_len - GUC_HXG_MSG_MIN_LEN;
CT_DEBUG(ct, "request %x %*ph\n", action, 4 * len, payload);
switch (action) {
case INTEL_GUC_ACTION_DEFAULT:
ret = intel_guc_to_host_process_recv_msg(guc, payload, len);
break;
case INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
ret = intel_guc_deregister_done_process_msg(guc, payload,
len);
break;
case INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
ret = intel_guc_sched_done_process_msg(guc, payload, len);
break;
case INTEL_GUC_ACTION_CONTEXT_RESET_NOTIFICATION:
ret = intel_guc_context_reset_process_msg(guc, payload, len);
break;
case INTEL_GUC_ACTION_STATE_CAPTURE_NOTIFICATION:
ret = intel_guc_error_capture_process_msg(guc, payload, len);
if (unlikely(ret))
CT_ERROR(ct, "error capture notification failed %x %*ph\n",
action, 4 * len, payload);
break;
case INTEL_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION:
ret = intel_guc_engine_failure_process_msg(guc, payload, len);
break;
case INTEL_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE:
intel_guc_log_handle_flush_event(&guc->log);
ret = 0;
break;
case INTEL_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED:
CT_ERROR(ct, "Received GuC crash dump notification!\n");
ret = 0;
break;
case INTEL_GUC_ACTION_NOTIFY_EXCEPTION:
CT_ERROR(ct, "Received GuC exception notification!\n");
ret = 0;
break;
default:
ret = -EOPNOTSUPP;
break;
}
if (unlikely(ret)) {
CT_ERROR(ct, "Failed to process request %04x (%pe)\n",
action, ERR_PTR(ret));
return ret;
}
ct_free_msg(request);
return 0;
}
static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
{
unsigned long flags;
struct ct_incoming_msg *request;
bool done;
int err;
spin_lock_irqsave(&ct->requests.lock, flags);
request = list_first_entry_or_null(&ct->requests.incoming,
struct ct_incoming_msg, link);
if (request)
list_del(&request->link);
done = !!list_empty(&ct->requests.incoming);
spin_unlock_irqrestore(&ct->requests.lock, flags);
if (!request)
return true;
err = ct_process_request(ct, request);
if (unlikely(err)) {
CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n",
ERR_PTR(err), 4 * request->size, request->msg);
CT_DEAD(ct, PROCESS_FAILED);
ct_free_msg(request);
}
return done;
}
static void ct_incoming_request_worker_func(struct work_struct *w)
{
struct intel_guc_ct *ct =
container_of(w, struct intel_guc_ct, requests.worker);
bool done;
do {
done = ct_process_incoming_requests(ct);
} while (!done);
}
static int ct_handle_event(struct intel_guc_ct *ct, struct ct_incoming_msg *request)
{
const u32 *hxg = &request->msg[GUC_CTB_MSG_MIN_LEN];
u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
unsigned long flags;
GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT);
/*
* Adjusting the space must be done in IRQ or deadlock can occur as the
* CTB processing in the below workqueue can send CTBs which creates a
* circular dependency if the space was returned there.
*/
switch (action) {
case INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
case INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
g2h_release_space(ct, request->size);
}
spin_lock_irqsave(&ct->requests.lock, flags);
list_add_tail(&request->link, &ct->requests.incoming);
spin_unlock_irqrestore(&ct->requests.lock, flags);
queue_work(system_unbound_wq, &ct->requests.worker);
return 0;
}
static int ct_handle_hxg(struct intel_guc_ct *ct, struct ct_incoming_msg *msg)
{
u32 origin, type;
u32 *hxg;
int err;
if (unlikely(msg->size < GUC_CTB_HXG_MSG_MIN_LEN))
return -EBADMSG;
hxg = &msg->msg[GUC_CTB_MSG_MIN_LEN];
origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]);
if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) {
err = -EPROTO;
goto failed;
}
type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
switch (type) {
case GUC_HXG_TYPE_EVENT:
err = ct_handle_event(ct, msg);
break;
case GUC_HXG_TYPE_RESPONSE_SUCCESS:
case GUC_HXG_TYPE_RESPONSE_FAILURE:
case GUC_HXG_TYPE_NO_RESPONSE_RETRY:
err = ct_handle_response(ct, msg);
break;
default:
err = -EOPNOTSUPP;
}
if (unlikely(err)) {
failed:
CT_ERROR(ct, "Failed to handle HXG message (%pe) %*ph\n",
ERR_PTR(err), 4 * GUC_HXG_MSG_MIN_LEN, hxg);
}
return err;
}
static void ct_handle_msg(struct intel_guc_ct *ct, struct ct_incoming_msg *msg)
{
u32 format = FIELD_GET(GUC_CTB_MSG_0_FORMAT, msg->msg[0]);
int err;
if (format == GUC_CTB_FORMAT_HXG)
err = ct_handle_hxg(ct, msg);
else
err = -EOPNOTSUPP;
if (unlikely(err)) {
CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n",
ERR_PTR(err), 4 * msg->size, msg->msg);
ct_free_msg(msg);
}
}
/*
* Return: number available remaining dwords to read (0 if empty)
* or a negative error code on failure
*/
static int ct_receive(struct intel_guc_ct *ct)
{
struct ct_incoming_msg *msg = NULL;
unsigned long flags;
int ret;
spin_lock_irqsave(&ct->ctbs.recv.lock, flags);
ret = ct_read(ct, &msg);
spin_unlock_irqrestore(&ct->ctbs.recv.lock, flags);
if (ret < 0)
return ret;
if (msg)
ct_handle_msg(ct, msg);
return ret;
}
static void ct_try_receive_message(struct intel_guc_ct *ct)
{
int ret;
if (GEM_WARN_ON(!ct->enabled))
return;
ret = ct_receive(ct);
if (ret > 0)
tasklet_hi_schedule(&ct->receive_tasklet);
}
static void ct_receive_tasklet_func(struct tasklet_struct *t)
{
struct intel_guc_ct *ct = from_tasklet(ct, t, receive_tasklet);
ct_try_receive_message(ct);
}
/*
* When we're communicating with the GuC over CT, GuC uses events
* to notify us about new messages being posted on the RECV buffer.
*/
void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
{
if (unlikely(!ct->enabled)) {
WARN(1, "Unexpected GuC event received while CT disabled!\n");
return;
}
ct_try_receive_message(ct);
}
void intel_guc_ct_print_info(struct intel_guc_ct *ct,
struct drm_printer *p)
{
drm_printf(p, "CT %s\n", str_enabled_disabled(ct->enabled));
if (!ct->enabled)
return;
drm_printf(p, "H2G Space: %u\n",
atomic_read(&ct->ctbs.send.space) * 4);
drm_printf(p, "Head: %u\n",
ct->ctbs.send.desc->head);
drm_printf(p, "Tail: %u\n",
ct->ctbs.send.desc->tail);
drm_printf(p, "G2H Space: %u\n",
atomic_read(&ct->ctbs.recv.space) * 4);
drm_printf(p, "Head: %u\n",
ct->ctbs.recv.desc->head);
drm_printf(p, "Tail: %u\n",
ct->ctbs.recv.desc->tail);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
static void ct_dead_ct_worker_func(struct work_struct *w)
{
struct intel_guc_ct *ct = container_of(w, struct intel_guc_ct, dead_ct_worker);
struct intel_guc *guc = ct_to_guc(ct);
if (ct->dead_ct_reported)
return;
ct->dead_ct_reported = true;
guc_info(guc, "CTB is dead - reason=0x%X\n", ct->dead_ct_reason);
intel_klog_error_capture(guc_to_gt(guc), (intel_engine_mask_t)~0U);
}
#endif
| linux-master | drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
#include <linux/fs.h>
#include <drm/drm_print.h>
#include "gt/intel_gt_debugfs.h"
#include "intel_guc.h"
#include "intel_guc_log.h"
#include "intel_guc_log_debugfs.h"
#include "intel_uc.h"
static u32 obj_to_guc_log_dump_size(struct drm_i915_gem_object *obj)
{
u32 size;
if (!obj)
return PAGE_SIZE;
/* "0x%08x 0x%08x 0x%08x 0x%08x\n" => 16 bytes -> 44 chars => x2.75 */
size = ((obj->base.size * 11) + 3) / 4;
/* Add padding for final blank line, any extra header info, etc. */
size = PAGE_ALIGN(size + PAGE_SIZE);
return size;
}
static u32 guc_log_dump_size(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
if (!intel_guc_is_supported(guc))
return PAGE_SIZE;
if (!log->vma)
return PAGE_SIZE;
return obj_to_guc_log_dump_size(log->vma->obj);
}
static int guc_log_dump_show(struct seq_file *m, void *data)
{
struct drm_printer p = drm_seq_file_printer(m);
int ret;
ret = intel_guc_log_dump(m->private, &p, false);
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && seq_has_overflowed(m))
pr_warn_once("preallocated size:%zx for %s exceeded\n",
m->size, __func__);
return ret;
}
DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE_WITH_SIZE(guc_log_dump, guc_log_dump_size);
static u32 guc_load_err_dump_size(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
struct intel_uc *uc = container_of(guc, struct intel_uc, guc);
if (!intel_guc_is_supported(guc))
return PAGE_SIZE;
return obj_to_guc_log_dump_size(uc->load_err_log);
}
static int guc_load_err_log_dump_show(struct seq_file *m, void *data)
{
struct drm_printer p = drm_seq_file_printer(m);
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && seq_has_overflowed(m))
pr_warn_once("preallocated size:%zx for %s exceeded\n",
m->size, __func__);
return intel_guc_log_dump(m->private, &p, true);
}
DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE_WITH_SIZE(guc_load_err_log_dump, guc_load_err_dump_size);
static int guc_log_level_get(void *data, u64 *val)
{
struct intel_guc_log *log = data;
if (!log->vma)
return -ENODEV;
*val = intel_guc_log_get_level(log);
return 0;
}
static int guc_log_level_set(void *data, u64 val)
{
struct intel_guc_log *log = data;
if (!log->vma)
return -ENODEV;
return intel_guc_log_set_level(log, val);
}
DEFINE_SIMPLE_ATTRIBUTE(guc_log_level_fops,
guc_log_level_get, guc_log_level_set,
"%lld\n");
static int guc_log_relay_open(struct inode *inode, struct file *file)
{
struct intel_guc_log *log = inode->i_private;
if (!intel_guc_is_ready(log_to_guc(log)))
return -ENODEV;
file->private_data = log;
return intel_guc_log_relay_open(log);
}
static ssize_t
guc_log_relay_write(struct file *filp,
const char __user *ubuf,
size_t cnt,
loff_t *ppos)
{
struct intel_guc_log *log = filp->private_data;
int val;
int ret;
ret = kstrtoint_from_user(ubuf, cnt, 0, &val);
if (ret < 0)
return ret;
/*
* Enable and start the guc log relay on value of 1.
* Flush log relay for any other value.
*/
if (val == 1)
ret = intel_guc_log_relay_start(log);
else
intel_guc_log_relay_flush(log);
return ret ?: cnt;
}
static int guc_log_relay_release(struct inode *inode, struct file *file)
{
struct intel_guc_log *log = inode->i_private;
intel_guc_log_relay_close(log);
return 0;
}
static const struct file_operations guc_log_relay_fops = {
.owner = THIS_MODULE,
.open = guc_log_relay_open,
.write = guc_log_relay_write,
.release = guc_log_relay_release,
};
void intel_guc_log_debugfs_register(struct intel_guc_log *log,
struct dentry *root)
{
static const struct intel_gt_debugfs_file files[] = {
{ "guc_log_dump", &guc_log_dump_fops, NULL },
{ "guc_load_err_log_dump", &guc_load_err_log_dump_fops, NULL },
{ "guc_log_level", &guc_log_level_fops, NULL },
{ "guc_log_relay", &guc_log_relay_fops, NULL },
};
if (!intel_guc_is_supported(log_to_guc(log)))
return;
intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), log);
}
| linux-master | drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c |
// SPDX-License-Identifier: MIT
/*
* Copyright �� 2019 Intel Corporation
*/
#include "gt/intel_gt_print.h"
#include "selftests/igt_spinner.h"
#include "selftests/igt_reset.h"
#include "selftests/intel_scheduler_helpers.h"
#include "gt/intel_engine_heartbeat.h"
#include "gem/selftests/mock_context.h"
static void logical_sort(struct intel_engine_cs **engines, int num_engines)
{
struct intel_engine_cs *sorted[MAX_ENGINE_INSTANCE + 1];
int i, j;
for (i = 0; i < num_engines; ++i)
for (j = 0; j < MAX_ENGINE_INSTANCE + 1; ++j) {
if (engines[j]->logical_mask & BIT(i)) {
sorted[i] = engines[j];
break;
}
}
memcpy(*engines, *sorted,
sizeof(struct intel_engine_cs *) * num_engines);
}
static struct intel_context *
multi_lrc_create_parent(struct intel_gt *gt, u8 class,
unsigned long flags)
{
struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
struct intel_engine_cs *engine;
enum intel_engine_id id;
int i = 0;
for_each_engine(engine, gt, id) {
if (engine->class != class)
continue;
siblings[i++] = engine;
}
if (i <= 1)
return ERR_PTR(0);
logical_sort(siblings, i);
return intel_engine_create_parallel(siblings, 1, i);
}
static void multi_lrc_context_unpin(struct intel_context *ce)
{
struct intel_context *child;
GEM_BUG_ON(!intel_context_is_parent(ce));
for_each_child(ce, child)
intel_context_unpin(child);
intel_context_unpin(ce);
}
static void multi_lrc_context_put(struct intel_context *ce)
{
GEM_BUG_ON(!intel_context_is_parent(ce));
/*
* Only the parent gets the creation ref put in the uAPI, the parent
* itself is responsible for creation ref put on the children.
*/
intel_context_put(ce);
}
static struct i915_request *
multi_lrc_nop_request(struct intel_context *ce)
{
struct intel_context *child;
struct i915_request *rq, *child_rq;
int i = 0;
GEM_BUG_ON(!intel_context_is_parent(ce));
rq = intel_context_create_request(ce);
if (IS_ERR(rq))
return rq;
i915_request_get(rq);
i915_request_add(rq);
for_each_child(ce, child) {
child_rq = intel_context_create_request(child);
if (IS_ERR(child_rq))
goto child_error;
if (++i == ce->parallel.number_children)
set_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL,
&child_rq->fence.flags);
i915_request_add(child_rq);
}
return rq;
child_error:
i915_request_put(rq);
return ERR_PTR(-ENOMEM);
}
static int __intel_guc_multi_lrc_basic(struct intel_gt *gt, unsigned int class)
{
struct intel_context *parent;
struct i915_request *rq;
int ret;
parent = multi_lrc_create_parent(gt, class, 0);
if (IS_ERR(parent)) {
gt_err(gt, "Failed creating contexts: %pe\n", parent);
return PTR_ERR(parent);
} else if (!parent) {
gt_dbg(gt, "Not enough engines in class: %d\n", class);
return 0;
}
rq = multi_lrc_nop_request(parent);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
gt_err(gt, "Failed creating requests: %pe\n", rq);
goto out;
}
ret = intel_selftest_wait_for_rq(rq);
if (ret)
gt_err(gt, "Failed waiting on request: %pe\n", ERR_PTR(ret));
i915_request_put(rq);
if (ret >= 0) {
ret = intel_gt_wait_for_idle(gt, HZ * 5);
if (ret < 0)
gt_err(gt, "GT failed to idle: %pe\n", ERR_PTR(ret));
}
out:
multi_lrc_context_unpin(parent);
multi_lrc_context_put(parent);
return ret;
}
static int intel_guc_multi_lrc_basic(void *arg)
{
struct intel_gt *gt = arg;
unsigned int class;
int ret;
for (class = 0; class < MAX_ENGINE_CLASS + 1; ++class) {
/* We don't support breadcrumb handshake on these classes */
if (class == COMPUTE_CLASS || class == RENDER_CLASS)
continue;
ret = __intel_guc_multi_lrc_basic(gt, class);
if (ret)
return ret;
}
return 0;
}
int intel_guc_multi_lrc_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(intel_guc_multi_lrc_basic),
};
struct intel_gt *gt = to_gt(i915);
if (intel_gt_is_wedged(gt))
return 0;
if (!intel_uc_uses_guc_submission(>->uc))
return 0;
return intel_gt_live_subtests(tests, gt);
}
| linux-master | drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2014-2019 Intel Corporation
*
* Authors:
* Vinit Azad <[email protected]>
* Ben Widawsky <[email protected]>
* Dave Gordon <[email protected]>
* Alex Dai <[email protected]>
*/
#include "gt/intel_gt.h"
#include "gt/intel_gt_mcr.h"
#include "gt/intel_gt_regs.h"
#include "gt/intel_rps.h"
#include "intel_guc_fw.h"
#include "intel_guc_print.h"
#include "i915_drv.h"
static void guc_prepare_xfer(struct intel_gt *gt)
{
struct intel_uncore *uncore = gt->uncore;
u32 shim_flags = GUC_ENABLE_READ_CACHE_LOGIC |
GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA |
GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
GUC_ENABLE_MIA_CLOCK_GATING;
if (GRAPHICS_VER_FULL(uncore->i915) < IP_VER(12, 50))
shim_flags |= GUC_DISABLE_SRAM_INIT_TO_ZEROES |
GUC_ENABLE_MIA_CACHING;
/* Must program this register before loading the ucode with DMA */
intel_uncore_write(uncore, GUC_SHIM_CONTROL, shim_flags);
if (IS_GEN9_LP(uncore->i915))
intel_uncore_write(uncore, GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
else
intel_uncore_write(uncore, GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
if (GRAPHICS_VER(uncore->i915) == 9) {
/* DOP Clock Gating Enable for GuC clocks */
intel_uncore_rmw(uncore, GEN7_MISCCPCTL, 0,
GEN8_DOP_CLOCK_GATE_GUC_ENABLE);
/* allows for 5us (in 10ns units) before GT can go to RC6 */
intel_uncore_write(uncore, GUC_ARAT_C6DIS, 0x1FF);
}
}
static int guc_xfer_rsa_mmio(struct intel_uc_fw *guc_fw,
struct intel_uncore *uncore)
{
u32 rsa[UOS_RSA_SCRATCH_COUNT];
size_t copied;
int i;
copied = intel_uc_fw_copy_rsa(guc_fw, rsa, sizeof(rsa));
if (copied < sizeof(rsa))
return -ENOMEM;
for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
intel_uncore_write(uncore, UOS_RSA_SCRATCH(i), rsa[i]);
return 0;
}
static int guc_xfer_rsa_vma(struct intel_uc_fw *guc_fw,
struct intel_uncore *uncore)
{
struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
intel_uncore_write(uncore, UOS_RSA_SCRATCH(0),
intel_guc_ggtt_offset(guc, guc_fw->rsa_data));
return 0;
}
/* Copy RSA signature from the fw image to HW for verification */
static int guc_xfer_rsa(struct intel_uc_fw *guc_fw,
struct intel_uncore *uncore)
{
if (guc_fw->rsa_data)
return guc_xfer_rsa_vma(guc_fw, uncore);
else
return guc_xfer_rsa_mmio(guc_fw, uncore);
}
/*
* Read the GuC status register (GUC_STATUS) and store it in the
* specified location; then return a boolean indicating whether
* the value matches either completion or a known failure code.
*
* This is used for polling the GuC status in a wait_for()
* loop below.
*/
static inline bool guc_load_done(struct intel_uncore *uncore, u32 *status, bool *success)
{
u32 val = intel_uncore_read(uncore, GUC_STATUS);
u32 uk_val = REG_FIELD_GET(GS_UKERNEL_MASK, val);
u32 br_val = REG_FIELD_GET(GS_BOOTROM_MASK, val);
*status = val;
switch (uk_val) {
case INTEL_GUC_LOAD_STATUS_READY:
*success = true;
return true;
case INTEL_GUC_LOAD_STATUS_ERROR_DEVID_BUILD_MISMATCH:
case INTEL_GUC_LOAD_STATUS_GUC_PREPROD_BUILD_MISMATCH:
case INTEL_GUC_LOAD_STATUS_ERROR_DEVID_INVALID_GUCTYPE:
case INTEL_GUC_LOAD_STATUS_HWCONFIG_ERROR:
case INTEL_GUC_LOAD_STATUS_DPC_ERROR:
case INTEL_GUC_LOAD_STATUS_EXCEPTION:
case INTEL_GUC_LOAD_STATUS_INIT_DATA_INVALID:
case INTEL_GUC_LOAD_STATUS_MPU_DATA_INVALID:
case INTEL_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID:
*success = false;
return true;
}
switch (br_val) {
case INTEL_BOOTROM_STATUS_NO_KEY_FOUND:
case INTEL_BOOTROM_STATUS_RSA_FAILED:
case INTEL_BOOTROM_STATUS_PAVPC_FAILED:
case INTEL_BOOTROM_STATUS_WOPCM_FAILED:
case INTEL_BOOTROM_STATUS_LOADLOC_FAILED:
case INTEL_BOOTROM_STATUS_JUMP_FAILED:
case INTEL_BOOTROM_STATUS_RC6CTXCONFIG_FAILED:
case INTEL_BOOTROM_STATUS_MPUMAP_INCORRECT:
case INTEL_BOOTROM_STATUS_EXCEPTION:
case INTEL_BOOTROM_STATUS_PROD_KEY_CHECK_FAILURE:
*success = false;
return true;
}
return false;
}
/*
* Use a longer timeout for debug builds so that problems can be detected
* and analysed. But a shorter timeout for releases so that user's don't
* wait forever to find out there is a problem. Note that the only reason
* an end user should hit the timeout is in case of extreme thermal throttling.
* And a system that is that hot during boot is probably dead anyway!
*/
#if defined(CONFIG_DRM_I915_DEBUG_GEM)
#define GUC_LOAD_RETRY_LIMIT 20
#else
#define GUC_LOAD_RETRY_LIMIT 3
#endif
static int guc_wait_ucode(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
struct intel_uncore *uncore = gt->uncore;
ktime_t before, after, delta;
bool success;
u32 status;
int ret, count;
u64 delta_ms;
u32 before_freq;
/*
* Wait for the GuC to start up.
*
* Measurements indicate this should take no more than 20ms
* (assuming the GT clock is at maximum frequency). So, a
* timeout here indicates that the GuC has failed and is unusable.
* (Higher levels of the driver may decide to reset the GuC and
* attempt the ucode load again if this happens.)
*
* FIXME: There is a known (but exceedingly unlikely) race condition
* where the asynchronous frequency management code could reduce
* the GT clock while a GuC reload is in progress (during a full
* GT reset). A fix is in progress but there are complex locking
* issues to be resolved. In the meantime bump the timeout to
* 200ms. Even at slowest clock, this should be sufficient. And
* in the working case, a larger timeout makes no difference.
*
* IFWI updates have also been seen to cause sporadic failures due to
* the requested frequency not being granted and thus the firmware
* load is attempted at minimum frequency. That can lead to load times
* in the seconds range. However, there is a limit on how long an
* individual wait_for() can wait. So wrap it in a loop.
*/
before_freq = intel_rps_read_actual_frequency(&uncore->gt->rps);
before = ktime_get();
for (count = 0; count < GUC_LOAD_RETRY_LIMIT; count++) {
ret = wait_for(guc_load_done(uncore, &status, &success), 1000);
if (!ret || !success)
break;
guc_dbg(guc, "load still in progress, count = %d, freq = %dMHz, status = 0x%08X [0x%02X/%02X]\n",
count, intel_rps_read_actual_frequency(&uncore->gt->rps), status,
REG_FIELD_GET(GS_BOOTROM_MASK, status),
REG_FIELD_GET(GS_UKERNEL_MASK, status));
}
after = ktime_get();
delta = ktime_sub(after, before);
delta_ms = ktime_to_ms(delta);
if (ret || !success) {
u32 ukernel = REG_FIELD_GET(GS_UKERNEL_MASK, status);
u32 bootrom = REG_FIELD_GET(GS_BOOTROM_MASK, status);
guc_info(guc, "load failed: status = 0x%08X, time = %lldms, freq = %dMHz, ret = %d\n",
status, delta_ms, intel_rps_read_actual_frequency(&uncore->gt->rps), ret);
guc_info(guc, "load failed: status: Reset = %d, BootROM = 0x%02X, UKernel = 0x%02X, MIA = 0x%02X, Auth = 0x%02X\n",
REG_FIELD_GET(GS_MIA_IN_RESET, status),
bootrom, ukernel,
REG_FIELD_GET(GS_MIA_MASK, status),
REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
switch (bootrom) {
case INTEL_BOOTROM_STATUS_NO_KEY_FOUND:
guc_info(guc, "invalid key requested, header = 0x%08X\n",
intel_uncore_read(uncore, GUC_HEADER_INFO));
ret = -ENOEXEC;
break;
case INTEL_BOOTROM_STATUS_RSA_FAILED:
guc_info(guc, "firmware signature verification failed\n");
ret = -ENOEXEC;
break;
case INTEL_BOOTROM_STATUS_PROD_KEY_CHECK_FAILURE:
guc_info(guc, "firmware production part check failure\n");
ret = -ENOEXEC;
break;
}
switch (ukernel) {
case INTEL_GUC_LOAD_STATUS_EXCEPTION:
guc_info(guc, "firmware exception. EIP: %#x\n",
intel_uncore_read(uncore, SOFT_SCRATCH(13)));
ret = -ENXIO;
break;
case INTEL_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID:
guc_info(guc, "illegal register in save/restore workaround list\n");
ret = -EPERM;
break;
case INTEL_GUC_LOAD_STATUS_HWCONFIG_START:
guc_info(guc, "still extracting hwconfig table.\n");
ret = -ETIMEDOUT;
break;
}
/* Uncommon/unexpected error, see earlier status code print for details */
if (ret == 0)
ret = -ENXIO;
} else if (delta_ms > 200) {
guc_warn(guc, "excessive init time: %lldms! [status = 0x%08X, count = %d, ret = %d]\n",
delta_ms, status, count, ret);
guc_warn(guc, "excessive init time: [freq = %dMHz, before = %dMHz, perf_limit_reasons = 0x%08X]\n",
intel_rps_read_actual_frequency(&uncore->gt->rps), before_freq,
intel_uncore_read(uncore, intel_gt_perf_limit_reasons_reg(gt)));
} else {
guc_dbg(guc, "init took %lldms, freq = %dMHz, before = %dMHz, status = 0x%08X, count = %d, ret = %d\n",
delta_ms, intel_rps_read_actual_frequency(&uncore->gt->rps),
before_freq, status, count, ret);
}
return ret;
}
/**
* intel_guc_fw_upload() - load GuC uCode to device
* @guc: intel_guc structure
*
* Called from intel_uc_init_hw() during driver load, resume from sleep and
* after a GPU reset.
*
* The firmware image should have already been fetched into memory, so only
* check that fetch succeeded, and then transfer the image to the h/w.
*
* Return: non-zero code on error
*/
int intel_guc_fw_upload(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
struct intel_uncore *uncore = gt->uncore;
int ret;
guc_prepare_xfer(gt);
/*
* Note that GuC needs the CSS header plus uKernel code to be copied
* by the DMA engine in one operation, whereas the RSA signature is
* loaded separately, either by copying it to the UOS_RSA_SCRATCH
* register (if key size <= 256) or through a ggtt-pinned vma (if key
* size > 256). The RSA size and therefore the way we provide it to the
* HW is fixed for each platform and hard-coded in the bootrom.
*/
ret = guc_xfer_rsa(&guc->fw, uncore);
if (ret)
goto out;
/*
* Current uCode expects the code to be loaded at 8k; locations below
* this are used for the stack.
*/
ret = intel_uc_fw_upload(&guc->fw, 0x2000, UOS_MOVE);
if (ret)
goto out;
ret = guc_wait_ucode(guc);
if (ret)
goto out;
intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_RUNNING);
return 0;
out:
intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
return ret;
}
| linux-master | drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021 Intel Corporation
*/
#include <drm/drm_cache.h>
#include <linux/string_helpers.h>
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_guc_slpc.h"
#include "intel_guc_print.h"
#include "intel_mchbar_regs.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_regs.h"
#include "gt/intel_rps.h"
static inline struct intel_guc *slpc_to_guc(struct intel_guc_slpc *slpc)
{
return container_of(slpc, struct intel_guc, slpc);
}
static inline struct intel_gt *slpc_to_gt(struct intel_guc_slpc *slpc)
{
return guc_to_gt(slpc_to_guc(slpc));
}
static inline struct drm_i915_private *slpc_to_i915(struct intel_guc_slpc *slpc)
{
return slpc_to_gt(slpc)->i915;
}
static bool __detect_slpc_supported(struct intel_guc *guc)
{
/* GuC SLPC is unavailable for pre-Gen12 */
return guc->submission_supported &&
GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
}
static bool __guc_slpc_selected(struct intel_guc *guc)
{
if (!intel_guc_slpc_is_supported(guc))
return false;
return guc->submission_selected;
}
void intel_guc_slpc_init_early(struct intel_guc_slpc *slpc)
{
struct intel_guc *guc = slpc_to_guc(slpc);
slpc->supported = __detect_slpc_supported(guc);
slpc->selected = __guc_slpc_selected(guc);
}
static void slpc_mem_set_param(struct slpc_shared_data *data,
u32 id, u32 value)
{
GEM_BUG_ON(id >= SLPC_MAX_OVERRIDE_PARAMETERS);
/*
* When the flag bit is set, corresponding value will be read
* and applied by SLPC.
*/
data->override_params.bits[id >> 5] |= (1 << (id % 32));
data->override_params.values[id] = value;
}
static void slpc_mem_set_enabled(struct slpc_shared_data *data,
u8 enable_id, u8 disable_id)
{
/*
* Enabling a param involves setting the enable_id
* to 1 and disable_id to 0.
*/
slpc_mem_set_param(data, enable_id, 1);
slpc_mem_set_param(data, disable_id, 0);
}
static void slpc_mem_set_disabled(struct slpc_shared_data *data,
u8 enable_id, u8 disable_id)
{
/*
* Disabling a param involves setting the enable_id
* to 0 and disable_id to 1.
*/
slpc_mem_set_param(data, disable_id, 1);
slpc_mem_set_param(data, enable_id, 0);
}
static u32 slpc_get_state(struct intel_guc_slpc *slpc)
{
struct slpc_shared_data *data;
GEM_BUG_ON(!slpc->vma);
drm_clflush_virt_range(slpc->vaddr, sizeof(u32));
data = slpc->vaddr;
return data->header.global_state;
}
static int guc_action_slpc_set_param_nb(struct intel_guc *guc, u8 id, u32 value)
{
u32 request[] = {
GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
id,
value,
};
int ret;
ret = intel_guc_send_nb(guc, request, ARRAY_SIZE(request), 0);
return ret > 0 ? -EPROTO : ret;
}
static int slpc_set_param_nb(struct intel_guc_slpc *slpc, u8 id, u32 value)
{
struct intel_guc *guc = slpc_to_guc(slpc);
GEM_BUG_ON(id >= SLPC_MAX_PARAM);
return guc_action_slpc_set_param_nb(guc, id, value);
}
static int guc_action_slpc_set_param(struct intel_guc *guc, u8 id, u32 value)
{
u32 request[] = {
GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
id,
value,
};
int ret;
ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
return ret > 0 ? -EPROTO : ret;
}
static int guc_action_slpc_unset_param(struct intel_guc *guc, u8 id)
{
u32 request[] = {
GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1),
id,
};
return intel_guc_send(guc, request, ARRAY_SIZE(request));
}
static bool slpc_is_running(struct intel_guc_slpc *slpc)
{
return slpc_get_state(slpc) == SLPC_GLOBAL_STATE_RUNNING;
}
static int guc_action_slpc_query(struct intel_guc *guc, u32 offset)
{
u32 request[] = {
GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
offset,
0,
};
int ret;
ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
return ret > 0 ? -EPROTO : ret;
}
static int slpc_query_task_state(struct intel_guc_slpc *slpc)
{
struct intel_guc *guc = slpc_to_guc(slpc);
u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
int ret;
ret = guc_action_slpc_query(guc, offset);
if (unlikely(ret))
guc_probe_error(guc, "Failed to query task state: %pe\n", ERR_PTR(ret));
drm_clflush_virt_range(slpc->vaddr, SLPC_PAGE_SIZE_BYTES);
return ret;
}
static int slpc_set_param(struct intel_guc_slpc *slpc, u8 id, u32 value)
{
struct intel_guc *guc = slpc_to_guc(slpc);
int ret;
GEM_BUG_ON(id >= SLPC_MAX_PARAM);
ret = guc_action_slpc_set_param(guc, id, value);
if (ret)
guc_probe_error(guc, "Failed to set param %d to %u: %pe\n",
id, value, ERR_PTR(ret));
return ret;
}
static int slpc_unset_param(struct intel_guc_slpc *slpc, u8 id)
{
struct intel_guc *guc = slpc_to_guc(slpc);
GEM_BUG_ON(id >= SLPC_MAX_PARAM);
return guc_action_slpc_unset_param(guc, id);
}
static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
{
struct intel_guc *guc = slpc_to_guc(slpc);
struct drm_i915_private *i915 = slpc_to_i915(slpc);
intel_wakeref_t wakeref;
int ret = 0;
lockdep_assert_held(&slpc->lock);
if (!intel_guc_is_ready(guc))
return -ENODEV;
/*
* This function is a little different as compared to
* intel_guc_slpc_set_min_freq(). Softlimit will not be updated
* here since this is used to temporarily change min freq,
* for example, during a waitboost. Caller is responsible for
* checking bounds.
*/
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
/* Non-blocking request will avoid stalls */
ret = slpc_set_param_nb(slpc,
SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
freq);
if (ret)
guc_notice(guc, "Failed to send set_param for min freq(%d): %pe\n",
freq, ERR_PTR(ret));
}
return ret;
}
static void slpc_boost_work(struct work_struct *work)
{
struct intel_guc_slpc *slpc = container_of(work, typeof(*slpc), boost_work);
int err;
/*
* Raise min freq to boost. It's possible that
* this is greater than current max. But it will
* certainly be limited by RP0. An error setting
* the min param is not fatal.
*/
mutex_lock(&slpc->lock);
if (atomic_read(&slpc->num_waiters)) {
err = slpc_force_min_freq(slpc, slpc->boost_freq);
if (!err)
slpc->num_boosts++;
}
mutex_unlock(&slpc->lock);
}
int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
{
struct intel_guc *guc = slpc_to_guc(slpc);
u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
int err;
GEM_BUG_ON(slpc->vma);
err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr);
if (unlikely(err)) {
guc_probe_error(guc, "Failed to allocate SLPC struct: %pe\n", ERR_PTR(err));
return err;
}
slpc->max_freq_softlimit = 0;
slpc->min_freq_softlimit = 0;
slpc->ignore_eff_freq = false;
slpc->min_is_rpmax = false;
slpc->boost_freq = 0;
atomic_set(&slpc->num_waiters, 0);
slpc->num_boosts = 0;
slpc->media_ratio_mode = SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL;
mutex_init(&slpc->lock);
INIT_WORK(&slpc->boost_work, slpc_boost_work);
return err;
}
static const char *slpc_global_state_to_string(enum slpc_global_state state)
{
switch (state) {
case SLPC_GLOBAL_STATE_NOT_RUNNING:
return "not running";
case SLPC_GLOBAL_STATE_INITIALIZING:
return "initializing";
case SLPC_GLOBAL_STATE_RESETTING:
return "resetting";
case SLPC_GLOBAL_STATE_RUNNING:
return "running";
case SLPC_GLOBAL_STATE_SHUTTING_DOWN:
return "shutting down";
case SLPC_GLOBAL_STATE_ERROR:
return "error";
default:
return "unknown";
}
}
static const char *slpc_get_state_string(struct intel_guc_slpc *slpc)
{
return slpc_global_state_to_string(slpc_get_state(slpc));
}
static int guc_action_slpc_reset(struct intel_guc *guc, u32 offset)
{
u32 request[] = {
GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
SLPC_EVENT(SLPC_EVENT_RESET, 2),
offset,
0,
};
int ret;
ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
return ret > 0 ? -EPROTO : ret;
}
static int slpc_reset(struct intel_guc_slpc *slpc)
{
struct intel_guc *guc = slpc_to_guc(slpc);
u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
int ret;
ret = guc_action_slpc_reset(guc, offset);
if (unlikely(ret < 0)) {
guc_probe_error(guc, "SLPC reset action failed: %pe\n", ERR_PTR(ret));
return ret;
}
if (!ret) {
if (wait_for(slpc_is_running(slpc), SLPC_RESET_TIMEOUT_MS)) {
guc_probe_error(guc, "SLPC not enabled! State = %s\n",
slpc_get_state_string(slpc));
return -EIO;
}
}
return 0;
}
static u32 slpc_decode_min_freq(struct intel_guc_slpc *slpc)
{
struct slpc_shared_data *data = slpc->vaddr;
GEM_BUG_ON(!slpc->vma);
return DIV_ROUND_CLOSEST(REG_FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
data->task_state_data.freq) *
GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER);
}
static u32 slpc_decode_max_freq(struct intel_guc_slpc *slpc)
{
struct slpc_shared_data *data = slpc->vaddr;
GEM_BUG_ON(!slpc->vma);
return DIV_ROUND_CLOSEST(REG_FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
data->task_state_data.freq) *
GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER);
}
static void slpc_shared_data_reset(struct slpc_shared_data *data)
{
memset(data, 0, sizeof(struct slpc_shared_data));
data->header.size = sizeof(struct slpc_shared_data);
/* Enable only GTPERF task, disable others */
slpc_mem_set_enabled(data, SLPC_PARAM_TASK_ENABLE_GTPERF,
SLPC_PARAM_TASK_DISABLE_GTPERF);
slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_BALANCER,
SLPC_PARAM_TASK_DISABLE_BALANCER);
slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_DCC,
SLPC_PARAM_TASK_DISABLE_DCC);
}
/**
* intel_guc_slpc_set_max_freq() - Set max frequency limit for SLPC.
* @slpc: pointer to intel_guc_slpc.
* @val: frequency (MHz)
*
* This function will invoke GuC SLPC action to update the max frequency
* limit for unslice.
*
* Return: 0 on success, non-zero error code on failure.
*/
int intel_guc_slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 val)
{
struct drm_i915_private *i915 = slpc_to_i915(slpc);
intel_wakeref_t wakeref;
int ret;
if (val < slpc->min_freq ||
val > slpc->rp0_freq ||
val < slpc->min_freq_softlimit)
return -EINVAL;
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
ret = slpc_set_param(slpc,
SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
val);
/* Return standardized err code for sysfs calls */
if (ret)
ret = -EIO;
}
if (!ret)
slpc->max_freq_softlimit = val;
return ret;
}
/**
* intel_guc_slpc_get_max_freq() - Get max frequency limit for SLPC.
* @slpc: pointer to intel_guc_slpc.
* @val: pointer to val which will hold max frequency (MHz)
*
* This function will invoke GuC SLPC action to read the max frequency
* limit for unslice.
*
* Return: 0 on success, non-zero error code on failure.
*/
int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val)
{
struct drm_i915_private *i915 = slpc_to_i915(slpc);
intel_wakeref_t wakeref;
int ret = 0;
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
/* Force GuC to update task data */
ret = slpc_query_task_state(slpc);
if (!ret)
*val = slpc_decode_max_freq(slpc);
}
return ret;
}
int intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc *slpc, bool val)
{
struct drm_i915_private *i915 = slpc_to_i915(slpc);
intel_wakeref_t wakeref;
int ret;
mutex_lock(&slpc->lock);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
ret = slpc_set_param(slpc,
SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
val);
if (ret) {
guc_probe_error(slpc_to_guc(slpc), "Failed to set efficient freq(%d): %pe\n",
val, ERR_PTR(ret));
} else {
slpc->ignore_eff_freq = val;
/* Set min to RPn when we disable efficient freq */
if (val)
ret = slpc_set_param(slpc,
SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
slpc->min_freq);
}
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&slpc->lock);
return ret;
}
/**
* intel_guc_slpc_set_min_freq() - Set min frequency limit for SLPC.
* @slpc: pointer to intel_guc_slpc.
* @val: frequency (MHz)
*
* This function will invoke GuC SLPC action to update the min unslice
* frequency.
*
* Return: 0 on success, non-zero error code on failure.
*/
int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val)
{
struct drm_i915_private *i915 = slpc_to_i915(slpc);
intel_wakeref_t wakeref;
int ret;
if (val < slpc->min_freq ||
val > slpc->rp0_freq ||
val > slpc->max_freq_softlimit)
return -EINVAL;
/* Need a lock now since waitboost can be modifying min as well */
mutex_lock(&slpc->lock);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
ret = slpc_set_param(slpc,
SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
val);
if (!ret)
slpc->min_freq_softlimit = val;
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&slpc->lock);
/* Return standardized err code for sysfs calls */
if (ret)
ret = -EIO;
return ret;
}
/**
* intel_guc_slpc_get_min_freq() - Get min frequency limit for SLPC.
* @slpc: pointer to intel_guc_slpc.
* @val: pointer to val which will hold min frequency (MHz)
*
* This function will invoke GuC SLPC action to read the min frequency
* limit for unslice.
*
* Return: 0 on success, non-zero error code on failure.
*/
int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val)
{
struct drm_i915_private *i915 = slpc_to_i915(slpc);
intel_wakeref_t wakeref;
int ret = 0;
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
/* Force GuC to update task data */
ret = slpc_query_task_state(slpc);
if (!ret)
*val = slpc_decode_min_freq(slpc);
}
return ret;
}
int intel_guc_slpc_set_media_ratio_mode(struct intel_guc_slpc *slpc, u32 val)
{
struct drm_i915_private *i915 = slpc_to_i915(slpc);
intel_wakeref_t wakeref;
int ret = 0;
if (!HAS_MEDIA_RATIO_MODE(i915))
return -ENODEV;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
ret = slpc_set_param(slpc,
SLPC_PARAM_MEDIA_FF_RATIO_MODE,
val);
return ret;
}
void intel_guc_pm_intrmsk_enable(struct intel_gt *gt)
{
u32 pm_intrmsk_mbz = 0;
/*
* Allow GuC to receive ARAT timer expiry event.
* This interrupt register is setup by RPS code
* when host based Turbo is enabled.
*/
pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
intel_uncore_rmw(gt->uncore,
GEN6_PMINTRMSK, pm_intrmsk_mbz, 0);
}
static int slpc_set_softlimits(struct intel_guc_slpc *slpc)
{
int ret = 0;
/*
* Softlimits are initially equivalent to platform limits
* unless they have deviated from defaults, in which case,
* we retain the values and set min/max accordingly.
*/
if (!slpc->max_freq_softlimit) {
slpc->max_freq_softlimit = slpc->rp0_freq;
slpc_to_gt(slpc)->defaults.max_freq = slpc->max_freq_softlimit;
} else if (slpc->max_freq_softlimit != slpc->rp0_freq) {
ret = intel_guc_slpc_set_max_freq(slpc,
slpc->max_freq_softlimit);
}
if (unlikely(ret))
return ret;
if (!slpc->min_freq_softlimit) {
/* Min softlimit is initialized to RPn */
slpc->min_freq_softlimit = slpc->min_freq;
slpc_to_gt(slpc)->defaults.min_freq = slpc->min_freq_softlimit;
} else {
return intel_guc_slpc_set_min_freq(slpc,
slpc->min_freq_softlimit);
}
return 0;
}
static bool is_slpc_min_freq_rpmax(struct intel_guc_slpc *slpc)
{
int slpc_min_freq;
int ret;
ret = intel_guc_slpc_get_min_freq(slpc, &slpc_min_freq);
if (ret) {
guc_err(slpc_to_guc(slpc), "Failed to get min freq: %pe\n", ERR_PTR(ret));
return false;
}
if (slpc_min_freq == SLPC_MAX_FREQ_MHZ)
return true;
else
return false;
}
static void update_server_min_softlimit(struct intel_guc_slpc *slpc)
{
/* For server parts, SLPC min will be at RPMax.
* Use min softlimit to clamp it to RP0 instead.
*/
if (!slpc->min_freq_softlimit &&
is_slpc_min_freq_rpmax(slpc)) {
slpc->min_is_rpmax = true;
slpc->min_freq_softlimit = slpc->rp0_freq;
(slpc_to_gt(slpc))->defaults.min_freq = slpc->min_freq_softlimit;
}
}
static int slpc_use_fused_rp0(struct intel_guc_slpc *slpc)
{
/* Force SLPC to used platform rp0 */
return slpc_set_param(slpc,
SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
slpc->rp0_freq);
}
static void slpc_get_rp_values(struct intel_guc_slpc *slpc)
{
struct intel_rps *rps = &slpc_to_gt(slpc)->rps;
struct intel_rps_freq_caps caps;
gen6_rps_get_freq_caps(rps, &caps);
slpc->rp0_freq = intel_gpu_freq(rps, caps.rp0_freq);
slpc->rp1_freq = intel_gpu_freq(rps, caps.rp1_freq);
slpc->min_freq = intel_gpu_freq(rps, caps.min_freq);
if (!slpc->boost_freq)
slpc->boost_freq = slpc->rp0_freq;
}
/**
* intel_guc_slpc_override_gucrc_mode() - override GUCRC mode
* @slpc: pointer to intel_guc_slpc.
* @mode: new value of the mode.
*
* This function will override the GUCRC mode.
*
* Return: 0 on success, non-zero error code on failure.
*/
int intel_guc_slpc_override_gucrc_mode(struct intel_guc_slpc *slpc, u32 mode)
{
int ret;
struct drm_i915_private *i915 = slpc_to_i915(slpc);
intel_wakeref_t wakeref;
if (mode >= SLPC_GUCRC_MODE_MAX)
return -EINVAL;
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
ret = slpc_set_param(slpc, SLPC_PARAM_PWRGATE_RC_MODE, mode);
if (ret)
guc_err(slpc_to_guc(slpc), "Override RC mode %d failed: %pe\n",
mode, ERR_PTR(ret));
}
return ret;
}
int intel_guc_slpc_unset_gucrc_mode(struct intel_guc_slpc *slpc)
{
struct drm_i915_private *i915 = slpc_to_i915(slpc);
intel_wakeref_t wakeref;
int ret = 0;
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
ret = slpc_unset_param(slpc, SLPC_PARAM_PWRGATE_RC_MODE);
if (ret)
guc_err(slpc_to_guc(slpc), "Unsetting RC mode failed: %pe\n", ERR_PTR(ret));
}
return ret;
}
/*
* intel_guc_slpc_enable() - Start SLPC
* @slpc: pointer to intel_guc_slpc.
*
* SLPC is enabled by setting up the shared data structure and
* sending reset event to GuC SLPC. Initial data is setup in
* intel_guc_slpc_init. Here we send the reset event. We do
* not currently need a slpc_disable since this is taken care
* of automatically when a reset/suspend occurs and the GuC
* CTB is destroyed.
*
* Return: 0 on success, non-zero error code on failure.
*/
int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
{
struct intel_guc *guc = slpc_to_guc(slpc);
int ret;
GEM_BUG_ON(!slpc->vma);
slpc_shared_data_reset(slpc->vaddr);
ret = slpc_reset(slpc);
if (unlikely(ret < 0)) {
guc_probe_error(guc, "SLPC Reset event returned: %pe\n", ERR_PTR(ret));
return ret;
}
ret = slpc_query_task_state(slpc);
if (unlikely(ret < 0))
return ret;
intel_guc_pm_intrmsk_enable(slpc_to_gt(slpc));
slpc_get_rp_values(slpc);
/* Handle the case where min=max=RPmax */
update_server_min_softlimit(slpc);
/* Set SLPC max limit to RP0 */
ret = slpc_use_fused_rp0(slpc);
if (unlikely(ret)) {
guc_probe_error(guc, "Failed to set SLPC max to RP0: %pe\n", ERR_PTR(ret));
return ret;
}
/* Set cached value of ignore efficient freq */
intel_guc_slpc_set_ignore_eff_freq(slpc, slpc->ignore_eff_freq);
/* Revert SLPC min/max to softlimits if necessary */
ret = slpc_set_softlimits(slpc);
if (unlikely(ret)) {
guc_probe_error(guc, "Failed to set SLPC softlimits: %pe\n", ERR_PTR(ret));
return ret;
}
/* Set cached media freq ratio mode */
intel_guc_slpc_set_media_ratio_mode(slpc, slpc->media_ratio_mode);
return 0;
}
int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val)
{
int ret = 0;
if (val < slpc->min_freq || val > slpc->rp0_freq)
return -EINVAL;
mutex_lock(&slpc->lock);
if (slpc->boost_freq != val) {
/* Apply only if there are active waiters */
if (atomic_read(&slpc->num_waiters)) {
ret = slpc_force_min_freq(slpc, val);
if (ret) {
ret = -EIO;
goto done;
}
}
slpc->boost_freq = val;
}
done:
mutex_unlock(&slpc->lock);
return ret;
}
void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc)
{
/*
* Return min back to the softlimit.
* This is called during request retire,
* so we don't need to fail that if the
* set_param fails.
*/
mutex_lock(&slpc->lock);
if (atomic_dec_and_test(&slpc->num_waiters))
slpc_force_min_freq(slpc, slpc->min_freq_softlimit);
mutex_unlock(&slpc->lock);
}
int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p)
{
struct drm_i915_private *i915 = slpc_to_i915(slpc);
struct slpc_shared_data *data = slpc->vaddr;
struct slpc_task_state_data *slpc_tasks;
intel_wakeref_t wakeref;
int ret = 0;
GEM_BUG_ON(!slpc->vma);
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
ret = slpc_query_task_state(slpc);
if (!ret) {
slpc_tasks = &data->task_state_data;
drm_printf(p, "\tSLPC state: %s\n", slpc_get_state_string(slpc));
drm_printf(p, "\tGTPERF task active: %s\n",
str_yes_no(slpc_tasks->status & SLPC_GTPERF_TASK_ENABLED));
drm_printf(p, "\tMax freq: %u MHz\n",
slpc_decode_max_freq(slpc));
drm_printf(p, "\tMin freq: %u MHz\n",
slpc_decode_min_freq(slpc));
drm_printf(p, "\twaitboosts: %u\n",
slpc->num_boosts);
drm_printf(p, "\tBoosts outstanding: %u\n",
atomic_read(&slpc->num_waiters));
}
}
return ret;
}
void intel_guc_slpc_fini(struct intel_guc_slpc *slpc)
{
if (!slpc->vma)
return;
i915_vma_unpin_and_release(&slpc->vma, I915_VMA_RELEASE_MAP);
}
| linux-master | drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021-2022 Intel Corporation
*/
#include <linux/types.h>
#include <drm/drm_print.h>
#include "gt/intel_engine_regs.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_mcr.h"
#include "gt/intel_gt_regs.h"
#include "gt/intel_lrc.h"
#include "guc_capture_fwif.h"
#include "intel_guc_capture.h"
#include "intel_guc_fwif.h"
#include "intel_guc_print.h"
#include "i915_drv.h"
#include "i915_gpu_error.h"
#include "i915_irq.h"
#include "i915_memcpy.h"
#include "i915_reg.h"
/*
* Define all device tables of GuC error capture register lists
* NOTE: For engine-registers, GuC only needs the register offsets
* from the engine-mmio-base
*/
#define COMMON_BASE_GLOBAL \
{ FORCEWAKE_MT, 0, 0, "FORCEWAKE" }
#define COMMON_GEN8BASE_GLOBAL \
{ ERROR_GEN6, 0, 0, "ERROR_GEN6" }, \
{ DONE_REG, 0, 0, "DONE_REG" }, \
{ HSW_GTT_CACHE_EN, 0, 0, "HSW_GTT_CACHE_EN" }
#define GEN8_GLOBAL \
{ GEN8_FAULT_TLB_DATA0, 0, 0, "GEN8_FAULT_TLB_DATA0" }, \
{ GEN8_FAULT_TLB_DATA1, 0, 0, "GEN8_FAULT_TLB_DATA1" }
#define COMMON_GEN12BASE_GLOBAL \
{ GEN12_FAULT_TLB_DATA0, 0, 0, "GEN12_FAULT_TLB_DATA0" }, \
{ GEN12_FAULT_TLB_DATA1, 0, 0, "GEN12_FAULT_TLB_DATA1" }, \
{ GEN12_AUX_ERR_DBG, 0, 0, "AUX_ERR_DBG" }, \
{ GEN12_GAM_DONE, 0, 0, "GAM_DONE" }, \
{ GEN12_RING_FAULT_REG, 0, 0, "FAULT_REG" }
#define COMMON_BASE_ENGINE_INSTANCE \
{ RING_PSMI_CTL(0), 0, 0, "RC PSMI" }, \
{ RING_ESR(0), 0, 0, "ESR" }, \
{ RING_DMA_FADD(0), 0, 0, "RING_DMA_FADD_LDW" }, \
{ RING_DMA_FADD_UDW(0), 0, 0, "RING_DMA_FADD_UDW" }, \
{ RING_IPEIR(0), 0, 0, "IPEIR" }, \
{ RING_IPEHR(0), 0, 0, "IPEHR" }, \
{ RING_INSTPS(0), 0, 0, "INSTPS" }, \
{ RING_BBADDR(0), 0, 0, "RING_BBADDR_LOW32" }, \
{ RING_BBADDR_UDW(0), 0, 0, "RING_BBADDR_UP32" }, \
{ RING_BBSTATE(0), 0, 0, "BB_STATE" }, \
{ CCID(0), 0, 0, "CCID" }, \
{ RING_ACTHD(0), 0, 0, "ACTHD_LDW" }, \
{ RING_ACTHD_UDW(0), 0, 0, "ACTHD_UDW" }, \
{ RING_INSTPM(0), 0, 0, "INSTPM" }, \
{ RING_INSTDONE(0), 0, 0, "INSTDONE" }, \
{ RING_NOPID(0), 0, 0, "RING_NOPID" }, \
{ RING_START(0), 0, 0, "START" }, \
{ RING_HEAD(0), 0, 0, "HEAD" }, \
{ RING_TAIL(0), 0, 0, "TAIL" }, \
{ RING_CTL(0), 0, 0, "CTL" }, \
{ RING_MI_MODE(0), 0, 0, "MODE" }, \
{ RING_CONTEXT_CONTROL(0), 0, 0, "RING_CONTEXT_CONTROL" }, \
{ RING_HWS_PGA(0), 0, 0, "HWS" }, \
{ RING_MODE_GEN7(0), 0, 0, "GFX_MODE" }, \
{ GEN8_RING_PDP_LDW(0, 0), 0, 0, "PDP0_LDW" }, \
{ GEN8_RING_PDP_UDW(0, 0), 0, 0, "PDP0_UDW" }, \
{ GEN8_RING_PDP_LDW(0, 1), 0, 0, "PDP1_LDW" }, \
{ GEN8_RING_PDP_UDW(0, 1), 0, 0, "PDP1_UDW" }, \
{ GEN8_RING_PDP_LDW(0, 2), 0, 0, "PDP2_LDW" }, \
{ GEN8_RING_PDP_UDW(0, 2), 0, 0, "PDP2_UDW" }, \
{ GEN8_RING_PDP_LDW(0, 3), 0, 0, "PDP3_LDW" }, \
{ GEN8_RING_PDP_UDW(0, 3), 0, 0, "PDP3_UDW" }
#define COMMON_BASE_HAS_EU \
{ EIR, 0, 0, "EIR" }
#define COMMON_BASE_RENDER \
{ GEN7_SC_INSTDONE, 0, 0, "GEN7_SC_INSTDONE" }
#define COMMON_GEN12BASE_RENDER \
{ GEN12_SC_INSTDONE_EXTRA, 0, 0, "GEN12_SC_INSTDONE_EXTRA" }, \
{ GEN12_SC_INSTDONE_EXTRA2, 0, 0, "GEN12_SC_INSTDONE_EXTRA2" }
#define COMMON_GEN12BASE_VEC \
{ GEN12_SFC_DONE(0), 0, 0, "SFC_DONE[0]" }, \
{ GEN12_SFC_DONE(1), 0, 0, "SFC_DONE[1]" }, \
{ GEN12_SFC_DONE(2), 0, 0, "SFC_DONE[2]" }, \
{ GEN12_SFC_DONE(3), 0, 0, "SFC_DONE[3]" }
/* XE_LP Global */
static const struct __guc_mmio_reg_descr xe_lp_global_regs[] = {
COMMON_BASE_GLOBAL,
COMMON_GEN8BASE_GLOBAL,
COMMON_GEN12BASE_GLOBAL,
};
/* XE_LP Render / Compute Per-Class */
static const struct __guc_mmio_reg_descr xe_lp_rc_class_regs[] = {
COMMON_BASE_HAS_EU,
COMMON_BASE_RENDER,
COMMON_GEN12BASE_RENDER,
};
/* GEN8+ Render / Compute Per-Engine-Instance */
static const struct __guc_mmio_reg_descr gen8_rc_inst_regs[] = {
COMMON_BASE_ENGINE_INSTANCE,
};
/* GEN8+ Media Decode/Encode Per-Engine-Instance */
static const struct __guc_mmio_reg_descr gen8_vd_inst_regs[] = {
COMMON_BASE_ENGINE_INSTANCE,
};
/* XE_LP Video Enhancement Per-Class */
static const struct __guc_mmio_reg_descr xe_lp_vec_class_regs[] = {
COMMON_GEN12BASE_VEC,
};
/* GEN8+ Video Enhancement Per-Engine-Instance */
static const struct __guc_mmio_reg_descr gen8_vec_inst_regs[] = {
COMMON_BASE_ENGINE_INSTANCE,
};
/* GEN8+ Blitter Per-Engine-Instance */
static const struct __guc_mmio_reg_descr gen8_blt_inst_regs[] = {
COMMON_BASE_ENGINE_INSTANCE,
};
/* XE_LP - GSC Per-Engine-Instance */
static const struct __guc_mmio_reg_descr xe_lp_gsc_inst_regs[] = {
COMMON_BASE_ENGINE_INSTANCE,
};
/* GEN8 - Global */
static const struct __guc_mmio_reg_descr gen8_global_regs[] = {
COMMON_BASE_GLOBAL,
COMMON_GEN8BASE_GLOBAL,
GEN8_GLOBAL,
};
static const struct __guc_mmio_reg_descr gen8_rc_class_regs[] = {
COMMON_BASE_HAS_EU,
COMMON_BASE_RENDER,
};
/*
* Empty list to prevent warnings about unknown class/instance types
* as not all class/instanace types have entries on all platforms.
*/
static const struct __guc_mmio_reg_descr empty_regs_list[] = {
};
#define TO_GCAP_DEF_OWNER(x) (GUC_CAPTURE_LIST_INDEX_##x)
#define TO_GCAP_DEF_TYPE(x) (GUC_CAPTURE_LIST_TYPE_##x)
#define MAKE_REGLIST(regslist, regsowner, regstype, class) \
{ \
regslist, \
ARRAY_SIZE(regslist), \
TO_GCAP_DEF_OWNER(regsowner), \
TO_GCAP_DEF_TYPE(regstype), \
class, \
NULL, \
}
/* List of lists */
static const struct __guc_mmio_reg_descr_group gen8_lists[] = {
MAKE_REGLIST(gen8_global_regs, PF, GLOBAL, 0),
MAKE_REGLIST(gen8_rc_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE),
MAKE_REGLIST(gen8_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE),
MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEO),
MAKE_REGLIST(gen8_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEO),
MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE),
MAKE_REGLIST(gen8_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE),
MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_BLITTER),
MAKE_REGLIST(gen8_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_BLITTER),
MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_GSC_OTHER),
MAKE_REGLIST(empty_regs_list, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_GSC_OTHER),
{}
};
static const struct __guc_mmio_reg_descr_group xe_lp_lists[] = {
MAKE_REGLIST(xe_lp_global_regs, PF, GLOBAL, 0),
MAKE_REGLIST(xe_lp_rc_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE),
MAKE_REGLIST(gen8_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE),
MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEO),
MAKE_REGLIST(gen8_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEO),
MAKE_REGLIST(xe_lp_vec_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE),
MAKE_REGLIST(gen8_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE),
MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_BLITTER),
MAKE_REGLIST(gen8_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_BLITTER),
MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_GSC_OTHER),
MAKE_REGLIST(xe_lp_gsc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_GSC_OTHER),
{}
};
static const struct __guc_mmio_reg_descr_group *
guc_capture_get_one_list(const struct __guc_mmio_reg_descr_group *reglists,
u32 owner, u32 type, u32 id)
{
int i;
if (!reglists)
return NULL;
for (i = 0; reglists[i].list; ++i) {
if (reglists[i].owner == owner && reglists[i].type == type &&
(reglists[i].engine == id || reglists[i].type == GUC_CAPTURE_LIST_TYPE_GLOBAL))
return ®lists[i];
}
return NULL;
}
static struct __guc_mmio_reg_descr_group *
guc_capture_get_one_ext_list(struct __guc_mmio_reg_descr_group *reglists,
u32 owner, u32 type, u32 id)
{
int i;
if (!reglists)
return NULL;
for (i = 0; reglists[i].extlist; ++i) {
if (reglists[i].owner == owner && reglists[i].type == type &&
(reglists[i].engine == id || reglists[i].type == GUC_CAPTURE_LIST_TYPE_GLOBAL))
return ®lists[i];
}
return NULL;
}
static void guc_capture_free_extlists(struct __guc_mmio_reg_descr_group *reglists)
{
int i = 0;
if (!reglists)
return;
while (reglists[i].extlist)
kfree(reglists[i++].extlist);
}
struct __ext_steer_reg {
const char *name;
i915_mcr_reg_t reg;
};
static const struct __ext_steer_reg gen8_extregs[] = {
{"GEN8_SAMPLER_INSTDONE", GEN8_SAMPLER_INSTDONE},
{"GEN8_ROW_INSTDONE", GEN8_ROW_INSTDONE}
};
static const struct __ext_steer_reg xehpg_extregs[] = {
{"XEHPG_INSTDONE_GEOM_SVG", XEHPG_INSTDONE_GEOM_SVG}
};
static void __fill_ext_reg(struct __guc_mmio_reg_descr *ext,
const struct __ext_steer_reg *extlist,
int slice_id, int subslice_id)
{
ext->reg = _MMIO(i915_mmio_reg_offset(extlist->reg));
ext->flags = FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id);
ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, subslice_id);
ext->regname = extlist->name;
}
static int
__alloc_ext_regs(struct __guc_mmio_reg_descr_group *newlist,
const struct __guc_mmio_reg_descr_group *rootlist, int num_regs)
{
struct __guc_mmio_reg_descr *list;
list = kcalloc(num_regs, sizeof(struct __guc_mmio_reg_descr), GFP_KERNEL);
if (!list)
return -ENOMEM;
newlist->extlist = list;
newlist->num_regs = num_regs;
newlist->owner = rootlist->owner;
newlist->engine = rootlist->engine;
newlist->type = rootlist->type;
return 0;
}
static void
guc_capture_alloc_steered_lists(struct intel_guc *guc,
const struct __guc_mmio_reg_descr_group *lists)
{
struct intel_gt *gt = guc_to_gt(guc);
int slice, subslice, iter, i, num_steer_regs, num_tot_regs = 0;
const struct __guc_mmio_reg_descr_group *list;
struct __guc_mmio_reg_descr_group *extlists;
struct __guc_mmio_reg_descr *extarray;
bool has_xehpg_extregs;
/* steered registers currently only exist for the render-class */
list = guc_capture_get_one_list(lists, GUC_CAPTURE_LIST_INDEX_PF,
GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE);
/* skip if extlists was previously allocated */
if (!list || guc->capture->extlists)
return;
has_xehpg_extregs = GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 55);
num_steer_regs = ARRAY_SIZE(gen8_extregs);
if (has_xehpg_extregs)
num_steer_regs += ARRAY_SIZE(xehpg_extregs);
for_each_ss_steering(iter, gt, slice, subslice)
num_tot_regs += num_steer_regs;
if (!num_tot_regs)
return;
/* allocate an extra for an end marker */
extlists = kcalloc(2, sizeof(struct __guc_mmio_reg_descr_group), GFP_KERNEL);
if (!extlists)
return;
if (__alloc_ext_regs(&extlists[0], list, num_tot_regs)) {
kfree(extlists);
return;
}
extarray = extlists[0].extlist;
for_each_ss_steering(iter, gt, slice, subslice) {
for (i = 0; i < ARRAY_SIZE(gen8_extregs); ++i) {
__fill_ext_reg(extarray, &gen8_extregs[i], slice, subslice);
++extarray;
}
if (has_xehpg_extregs) {
for (i = 0; i < ARRAY_SIZE(xehpg_extregs); ++i) {
__fill_ext_reg(extarray, &xehpg_extregs[i], slice, subslice);
++extarray;
}
}
}
guc_dbg(guc, "capture found %d ext-regs.\n", num_tot_regs);
guc->capture->extlists = extlists;
}
static const struct __guc_mmio_reg_descr_group *
guc_capture_get_device_reglist(struct intel_guc *guc)
{
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
const struct __guc_mmio_reg_descr_group *lists;
if (GRAPHICS_VER(i915) >= 12)
lists = xe_lp_lists;
else
lists = gen8_lists;
/*
* For certain engine classes, there are slice and subslice
* level registers requiring steering. We allocate and populate
* these at init time based on hw config add it as an extension
* list at the end of the pre-populated render list.
*/
guc_capture_alloc_steered_lists(guc, lists);
return lists;
}
static const char *
__stringify_type(u32 type)
{
switch (type) {
case GUC_CAPTURE_LIST_TYPE_GLOBAL:
return "Global";
case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS:
return "Class";
case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE:
return "Instance";
default:
break;
}
return "unknown";
}
static const char *
__stringify_engclass(u32 class)
{
switch (class) {
case GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE:
return "Render/Compute";
case GUC_CAPTURE_LIST_CLASS_VIDEO:
return "Video";
case GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE:
return "VideoEnhance";
case GUC_CAPTURE_LIST_CLASS_BLITTER:
return "Blitter";
case GUC_CAPTURE_LIST_CLASS_GSC_OTHER:
return "GSC-Other";
default:
break;
}
return "unknown";
}
static int
guc_capture_list_init(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
struct guc_mmio_reg *ptr, u16 num_entries)
{
u32 i = 0, j = 0;
const struct __guc_mmio_reg_descr_group *reglists = guc->capture->reglists;
struct __guc_mmio_reg_descr_group *extlists = guc->capture->extlists;
const struct __guc_mmio_reg_descr_group *match;
struct __guc_mmio_reg_descr_group *matchext;
if (!reglists)
return -ENODEV;
match = guc_capture_get_one_list(reglists, owner, type, classid);
if (!match)
return -ENODATA;
for (i = 0; i < num_entries && i < match->num_regs; ++i) {
ptr[i].offset = match->list[i].reg.reg;
ptr[i].value = 0xDEADF00D;
ptr[i].flags = match->list[i].flags;
ptr[i].mask = match->list[i].mask;
}
matchext = guc_capture_get_one_ext_list(extlists, owner, type, classid);
if (matchext) {
for (i = match->num_regs, j = 0; i < num_entries &&
i < (match->num_regs + matchext->num_regs) &&
j < matchext->num_regs; ++i, ++j) {
ptr[i].offset = matchext->extlist[j].reg.reg;
ptr[i].value = 0xDEADF00D;
ptr[i].flags = matchext->extlist[j].flags;
ptr[i].mask = matchext->extlist[j].mask;
}
}
if (i < num_entries)
guc_dbg(guc, "Got short capture reglist init: %d out %d.\n", i, num_entries);
return 0;
}
static int
guc_cap_list_num_regs(struct intel_guc_state_capture *gc, u32 owner, u32 type, u32 classid)
{
const struct __guc_mmio_reg_descr_group *match;
struct __guc_mmio_reg_descr_group *matchext;
int num_regs;
match = guc_capture_get_one_list(gc->reglists, owner, type, classid);
if (!match)
return 0;
num_regs = match->num_regs;
matchext = guc_capture_get_one_ext_list(gc->extlists, owner, type, classid);
if (matchext)
num_regs += matchext->num_regs;
return num_regs;
}
static int
guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
size_t *size, bool is_purpose_est)
{
struct intel_guc_state_capture *gc = guc->capture;
struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid];
int num_regs;
if (!gc->reglists) {
guc_warn(guc, "No capture reglist for this device\n");
return -ENODEV;
}
if (cache->is_valid) {
*size = cache->size;
return cache->status;
}
if (!is_purpose_est && owner == GUC_CAPTURE_LIST_INDEX_PF &&
!guc_capture_get_one_list(gc->reglists, owner, type, classid)) {
if (type == GUC_CAPTURE_LIST_TYPE_GLOBAL)
guc_warn(guc, "Missing capture reglist: global!\n");
else
guc_warn(guc, "Missing capture reglist: %s(%u):%s(%u)!\n",
__stringify_type(type), type,
__stringify_engclass(classid), classid);
return -ENODATA;
}
num_regs = guc_cap_list_num_regs(gc, owner, type, classid);
/* intentional empty lists can exist depending on hw config */
if (!num_regs)
return -ENODATA;
if (size)
*size = PAGE_ALIGN((sizeof(struct guc_debug_capture_list)) +
(num_regs * sizeof(struct guc_mmio_reg)));
return 0;
}
int
intel_guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
size_t *size)
{
return guc_capture_getlistsize(guc, owner, type, classid, size, false);
}
static void guc_capture_create_prealloc_nodes(struct intel_guc *guc);
int
intel_guc_capture_getlist(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
void **outptr)
{
struct intel_guc_state_capture *gc = guc->capture;
struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid];
struct guc_debug_capture_list *listnode;
int ret, num_regs;
u8 *caplist, *tmp;
size_t size = 0;
if (!gc->reglists)
return -ENODEV;
if (cache->is_valid) {
*outptr = cache->ptr;
return cache->status;
}
/*
* ADS population of input registers is a good
* time to pre-allocate cachelist output nodes
*/
guc_capture_create_prealloc_nodes(guc);
ret = intel_guc_capture_getlistsize(guc, owner, type, classid, &size);
if (ret) {
cache->is_valid = true;
cache->ptr = NULL;
cache->size = 0;
cache->status = ret;
return ret;
}
caplist = kzalloc(size, GFP_KERNEL);
if (!caplist) {
guc_dbg(guc, "Failed to alloc cached register capture list");
return -ENOMEM;
}
/* populate capture list header */
tmp = caplist;
num_regs = guc_cap_list_num_regs(guc->capture, owner, type, classid);
listnode = (struct guc_debug_capture_list *)tmp;
listnode->header.info = FIELD_PREP(GUC_CAPTURELISTHDR_NUMDESCR, (u32)num_regs);
/* populate list of register descriptor */
tmp += sizeof(struct guc_debug_capture_list);
guc_capture_list_init(guc, owner, type, classid, (struct guc_mmio_reg *)tmp, num_regs);
/* cache this list */
cache->is_valid = true;
cache->ptr = caplist;
cache->size = size;
cache->status = 0;
*outptr = caplist;
return 0;
}
int
intel_guc_capture_getnullheader(struct intel_guc *guc,
void **outptr, size_t *size)
{
struct intel_guc_state_capture *gc = guc->capture;
int tmp = sizeof(u32) * 4;
void *null_header;
if (gc->ads_null_cache) {
*outptr = gc->ads_null_cache;
*size = tmp;
return 0;
}
null_header = kzalloc(tmp, GFP_KERNEL);
if (!null_header) {
guc_dbg(guc, "Failed to alloc cached register capture null list");
return -ENOMEM;
}
gc->ads_null_cache = null_header;
*outptr = null_header;
*size = tmp;
return 0;
}
static int
guc_capture_output_min_size_est(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
struct intel_engine_cs *engine;
enum intel_engine_id id;
int worst_min_size = 0;
size_t tmp = 0;
if (!guc->capture)
return -ENODEV;
/*
* If every single engine-instance suffered a failure in quick succession but
* were all unrelated, then a burst of multiple error-capture events would dump
* registers for every one engine instance, one at a time. In this case, GuC
* would even dump the global-registers repeatedly.
*
* For each engine instance, there would be 1 x guc_state_capture_group_t output
* followed by 3 x guc_state_capture_t lists. The latter is how the register
* dumps are split across different register types (where the '3' are global vs class
* vs instance).
*/
for_each_engine(engine, gt, id) {
worst_min_size += sizeof(struct guc_state_capture_group_header_t) +
(3 * sizeof(struct guc_state_capture_header_t));
if (!guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0, &tmp, true))
worst_min_size += tmp;
if (!guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
engine->class, &tmp, true)) {
worst_min_size += tmp;
}
if (!guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
engine->class, &tmp, true)) {
worst_min_size += tmp;
}
}
return worst_min_size;
}
/*
* Add on a 3x multiplier to allow for multiple back-to-back captures occurring
* before the i915 can read the data out and process it
*/
#define GUC_CAPTURE_OVERBUFFER_MULTIPLIER 3
static void check_guc_capture_size(struct intel_guc *guc)
{
int min_size = guc_capture_output_min_size_est(guc);
int spare_size = min_size * GUC_CAPTURE_OVERBUFFER_MULTIPLIER;
u32 buffer_size = intel_guc_log_section_size_capture(&guc->log);
/*
* NOTE: min_size is much smaller than the capture region allocation (DG2: <80K vs 1MB)
* Additionally, its based on space needed to fit all engines getting reset at once
* within the same G2H handler task slot. This is very unlikely. However, if GuC really
* does run out of space for whatever reason, we will see an separate warning message
* when processing the G2H event capture-notification, search for:
* INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE.
*/
if (min_size < 0)
guc_warn(guc, "Failed to calculate error state capture buffer minimum size: %d!\n",
min_size);
else if (min_size > buffer_size)
guc_warn(guc, "Error state capture buffer maybe small: %d < %d\n",
buffer_size, min_size);
else if (spare_size > buffer_size)
guc_dbg(guc, "Error state capture buffer lacks spare size: %d < %d (min = %d)\n",
buffer_size, spare_size, min_size);
}
/*
* KMD Init time flows:
* --------------------
* --> alloc A: GuC input capture regs lists (registered to GuC via ADS).
* intel_guc_ads acquires the register lists by calling
* intel_guc_capture_list_size and intel_guc_capture_list_get 'n' times,
* where n = 1 for global-reg-list +
* num_engine_classes for class-reg-list +
* num_engine_classes for instance-reg-list
* (since all instances of the same engine-class type
* have an identical engine-instance register-list).
* ADS module also calls separately for PF vs VF.
*
* --> alloc B: GuC output capture buf (registered via guc_init_params(log_param))
* Size = #define CAPTURE_BUFFER_SIZE (warns if on too-small)
* Note2: 'x 3' to hold multiple capture groups
*
* GUC Runtime notify capture:
* --------------------------
* --> G2H STATE_CAPTURE_NOTIFICATION
* L--> intel_guc_capture_process
* L--> Loop through B (head..tail) and for each engine instance's
* err-state-captured register-list we find, we alloc 'C':
* --> alloc C: A capture-output-node structure that includes misc capture info along
* with 3 register list dumps (global, engine-class and engine-instance)
* This node is created from a pre-allocated list of blank nodes in
* guc->capture->cachelist and populated with the error-capture
* data from GuC and then it's added into guc->capture->outlist linked
* list. This list is used for matchup and printout by i915_gpu_coredump
* and err_print_gt, (when user invokes the error capture sysfs).
*
* GUC --> notify context reset:
* -----------------------------
* --> G2H CONTEXT RESET
* L--> guc_handle_context_reset --> i915_capture_error_state
* L--> i915_gpu_coredump(..IS_GUC_CAPTURE) --> gt_record_engines
* --> capture_engine(..IS_GUC_CAPTURE)
* L--> intel_guc_capture_get_matching_node is where
* detach C from internal linked list and add it into
* intel_engine_coredump struct (if the context and
* engine of the event notification matches a node
* in the link list).
*
* User Sysfs / Debugfs
* --------------------
* --> i915_gpu_coredump_copy_to_buffer->
* L--> err_print_to_sgl --> err_print_gt
* L--> error_print_guc_captures
* L--> intel_guc_capture_print_node prints the
* register lists values of the attached node
* on the error-engine-dump being reported.
* L--> i915_reset_error_state ... -->__i915_gpu_coredump_free
* L--> ... cleanup_gt -->
* L--> intel_guc_capture_free_node returns the
* capture-output-node back to the internal
* cachelist for reuse.
*
*/
static int guc_capture_buf_cnt(struct __guc_capture_bufstate *buf)
{
if (buf->wr >= buf->rd)
return (buf->wr - buf->rd);
return (buf->size - buf->rd) + buf->wr;
}
static int guc_capture_buf_cnt_to_end(struct __guc_capture_bufstate *buf)
{
if (buf->rd > buf->wr)
return (buf->size - buf->rd);
return (buf->wr - buf->rd);
}
/*
* GuC's error-capture output is a ring buffer populated in a byte-stream fashion:
*
* The GuC Log buffer region for error-capture is managed like a ring buffer.
* The GuC firmware dumps error capture logs into this ring in a byte-stream flow.
* Additionally, as per the current and foreseeable future, all packed error-
* capture output structures are dword aligned.
*
* That said, if the GuC firmware is in the midst of writing a structure that is larger
* than one dword but the tail end of the err-capture buffer-region has lesser space left,
* we would need to extract that structure one dword at a time straddled across the end,
* onto the start of the ring.
*
* Below function, guc_capture_log_remove_dw is a helper for that. All callers of this
* function would typically do a straight-up memcpy from the ring contents and will only
* call this helper if their structure-extraction is straddling across the end of the
* ring. GuC firmware does not add any padding. The reason for the no-padding is to ease
* scalability for future expansion of output data types without requiring a redesign
* of the flow controls.
*/
static int
guc_capture_log_remove_dw(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
u32 *dw)
{
int tries = 2;
int avail = 0;
u32 *src_data;
if (!guc_capture_buf_cnt(buf))
return 0;
while (tries--) {
avail = guc_capture_buf_cnt_to_end(buf);
if (avail >= sizeof(u32)) {
src_data = (u32 *)(buf->data + buf->rd);
*dw = *src_data;
buf->rd += 4;
return 4;
}
if (avail)
guc_dbg(guc, "Register capture log not dword aligned, skipping.\n");
buf->rd = 0;
}
return 0;
}
static bool
guc_capture_data_extracted(struct __guc_capture_bufstate *b,
int size, void *dest)
{
if (guc_capture_buf_cnt_to_end(b) >= size) {
memcpy(dest, (b->data + b->rd), size);
b->rd += size;
return true;
}
return false;
}
static int
guc_capture_log_get_group_hdr(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
struct guc_state_capture_group_header_t *ghdr)
{
int read = 0;
int fullsize = sizeof(struct guc_state_capture_group_header_t);
if (fullsize > guc_capture_buf_cnt(buf))
return -1;
if (guc_capture_data_extracted(buf, fullsize, (void *)ghdr))
return 0;
read += guc_capture_log_remove_dw(guc, buf, &ghdr->owner);
read += guc_capture_log_remove_dw(guc, buf, &ghdr->info);
if (read != fullsize)
return -1;
return 0;
}
static int
guc_capture_log_get_data_hdr(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
struct guc_state_capture_header_t *hdr)
{
int read = 0;
int fullsize = sizeof(struct guc_state_capture_header_t);
if (fullsize > guc_capture_buf_cnt(buf))
return -1;
if (guc_capture_data_extracted(buf, fullsize, (void *)hdr))
return 0;
read += guc_capture_log_remove_dw(guc, buf, &hdr->owner);
read += guc_capture_log_remove_dw(guc, buf, &hdr->info);
read += guc_capture_log_remove_dw(guc, buf, &hdr->lrca);
read += guc_capture_log_remove_dw(guc, buf, &hdr->guc_id);
read += guc_capture_log_remove_dw(guc, buf, &hdr->num_mmios);
if (read != fullsize)
return -1;
return 0;
}
static int
guc_capture_log_get_register(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
struct guc_mmio_reg *reg)
{
int read = 0;
int fullsize = sizeof(struct guc_mmio_reg);
if (fullsize > guc_capture_buf_cnt(buf))
return -1;
if (guc_capture_data_extracted(buf, fullsize, (void *)reg))
return 0;
read += guc_capture_log_remove_dw(guc, buf, ®->offset);
read += guc_capture_log_remove_dw(guc, buf, ®->value);
read += guc_capture_log_remove_dw(guc, buf, ®->flags);
read += guc_capture_log_remove_dw(guc, buf, ®->mask);
if (read != fullsize)
return -1;
return 0;
}
static void
guc_capture_delete_one_node(struct intel_guc *guc, struct __guc_capture_parsed_output *node)
{
int i;
for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i)
kfree(node->reginfo[i].regs);
list_del(&node->link);
kfree(node);
}
static void
guc_capture_delete_prealloc_nodes(struct intel_guc *guc)
{
struct __guc_capture_parsed_output *n, *ntmp;
/*
* NOTE: At the end of driver operation, we must assume that we
* have prealloc nodes in both the cachelist as well as outlist
* if unclaimed error capture events occurred prior to shutdown.
*/
list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link)
guc_capture_delete_one_node(guc, n);
list_for_each_entry_safe(n, ntmp, &guc->capture->cachelist, link)
guc_capture_delete_one_node(guc, n);
}
static void
guc_capture_add_node_to_list(struct __guc_capture_parsed_output *node,
struct list_head *list)
{
list_add_tail(&node->link, list);
}
static void
guc_capture_add_node_to_outlist(struct intel_guc_state_capture *gc,
struct __guc_capture_parsed_output *node)
{
guc_capture_add_node_to_list(node, &gc->outlist);
}
static void
guc_capture_add_node_to_cachelist(struct intel_guc_state_capture *gc,
struct __guc_capture_parsed_output *node)
{
guc_capture_add_node_to_list(node, &gc->cachelist);
}
static void
guc_capture_init_node(struct intel_guc *guc, struct __guc_capture_parsed_output *node)
{
struct guc_mmio_reg *tmp[GUC_CAPTURE_LIST_TYPE_MAX];
int i;
for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
tmp[i] = node->reginfo[i].regs;
memset(tmp[i], 0, sizeof(struct guc_mmio_reg) *
guc->capture->max_mmio_per_node);
}
memset(node, 0, sizeof(*node));
for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i)
node->reginfo[i].regs = tmp[i];
INIT_LIST_HEAD(&node->link);
}
static struct __guc_capture_parsed_output *
guc_capture_get_prealloc_node(struct intel_guc *guc)
{
struct __guc_capture_parsed_output *found = NULL;
if (!list_empty(&guc->capture->cachelist)) {
struct __guc_capture_parsed_output *n, *ntmp;
/* get first avail node from the cache list */
list_for_each_entry_safe(n, ntmp, &guc->capture->cachelist, link) {
found = n;
list_del(&n->link);
break;
}
} else {
struct __guc_capture_parsed_output *n, *ntmp;
/* traverse down and steal back the oldest node already allocated */
list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link) {
found = n;
}
if (found)
list_del(&found->link);
}
if (found)
guc_capture_init_node(guc, found);
return found;
}
static struct __guc_capture_parsed_output *
guc_capture_alloc_one_node(struct intel_guc *guc)
{
struct __guc_capture_parsed_output *new;
int i;
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
return NULL;
for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
new->reginfo[i].regs = kcalloc(guc->capture->max_mmio_per_node,
sizeof(struct guc_mmio_reg), GFP_KERNEL);
if (!new->reginfo[i].regs) {
while (i)
kfree(new->reginfo[--i].regs);
kfree(new);
return NULL;
}
}
guc_capture_init_node(guc, new);
return new;
}
static struct __guc_capture_parsed_output *
guc_capture_clone_node(struct intel_guc *guc, struct __guc_capture_parsed_output *original,
u32 keep_reglist_mask)
{
struct __guc_capture_parsed_output *new;
int i;
new = guc_capture_get_prealloc_node(guc);
if (!new)
return NULL;
if (!original)
return new;
new->is_partial = original->is_partial;
/* copy reg-lists that we want to clone */
for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
if (keep_reglist_mask & BIT(i)) {
GEM_BUG_ON(original->reginfo[i].num_regs >
guc->capture->max_mmio_per_node);
memcpy(new->reginfo[i].regs, original->reginfo[i].regs,
original->reginfo[i].num_regs * sizeof(struct guc_mmio_reg));
new->reginfo[i].num_regs = original->reginfo[i].num_regs;
new->reginfo[i].vfid = original->reginfo[i].vfid;
if (i == GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS) {
new->eng_class = original->eng_class;
} else if (i == GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE) {
new->eng_inst = original->eng_inst;
new->guc_id = original->guc_id;
new->lrca = original->lrca;
}
}
}
return new;
}
static void
__guc_capture_create_prealloc_nodes(struct intel_guc *guc)
{
struct __guc_capture_parsed_output *node = NULL;
int i;
for (i = 0; i < PREALLOC_NODES_MAX_COUNT; ++i) {
node = guc_capture_alloc_one_node(guc);
if (!node) {
guc_warn(guc, "Register capture pre-alloc-cache failure\n");
/* dont free the priors, use what we got and cleanup at shutdown */
return;
}
guc_capture_add_node_to_cachelist(guc->capture, node);
}
}
static int
guc_get_max_reglist_count(struct intel_guc *guc)
{
int i, j, k, tmp, maxregcount = 0;
for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; ++i) {
for (j = 0; j < GUC_CAPTURE_LIST_TYPE_MAX; ++j) {
for (k = 0; k < GUC_MAX_ENGINE_CLASSES; ++k) {
if (j == GUC_CAPTURE_LIST_TYPE_GLOBAL && k > 0)
continue;
tmp = guc_cap_list_num_regs(guc->capture, i, j, k);
if (tmp > maxregcount)
maxregcount = tmp;
}
}
}
if (!maxregcount)
maxregcount = PREALLOC_NODES_DEFAULT_NUMREGS;
return maxregcount;
}
static void
guc_capture_create_prealloc_nodes(struct intel_guc *guc)
{
/* skip if we've already done the pre-alloc */
if (guc->capture->max_mmio_per_node)
return;
guc->capture->max_mmio_per_node = guc_get_max_reglist_count(guc);
__guc_capture_create_prealloc_nodes(guc);
}
static int
guc_capture_extract_reglists(struct intel_guc *guc, struct __guc_capture_bufstate *buf)
{
struct guc_state_capture_group_header_t ghdr = {0};
struct guc_state_capture_header_t hdr = {0};
struct __guc_capture_parsed_output *node = NULL;
struct guc_mmio_reg *regs = NULL;
int i, numlists, numregs, ret = 0;
enum guc_capture_type datatype;
struct guc_mmio_reg tmp;
bool is_partial = false;
i = guc_capture_buf_cnt(buf);
if (!i)
return -ENODATA;
if (i % sizeof(u32)) {
guc_warn(guc, "Got mis-aligned register capture entries\n");
ret = -EIO;
goto bailout;
}
/* first get the capture group header */
if (guc_capture_log_get_group_hdr(guc, buf, &ghdr)) {
ret = -EIO;
goto bailout;
}
/*
* we would typically expect a layout as below where n would be expected to be
* anywhere between 3 to n where n > 3 if we are seeing multiple dependent engine
* instances being reset together.
* ____________________________________________
* | Capture Group |
* | ________________________________________ |
* | | Capture Group Header: | |
* | | - num_captures = 5 | |
* | |______________________________________| |
* | ________________________________________ |
* | | Capture1: | |
* | | Hdr: GLOBAL, numregs=a | |
* | | ____________________________________ | |
* | | | Reglist | | |
* | | | - reg1, reg2, ... rega | | |
* | | |__________________________________| | |
* | |______________________________________| |
* | ________________________________________ |
* | | Capture2: | |
* | | Hdr: CLASS=RENDER/COMPUTE, numregs=b| |
* | | ____________________________________ | |
* | | | Reglist | | |
* | | | - reg1, reg2, ... regb | | |
* | | |__________________________________| | |
* | |______________________________________| |
* | ________________________________________ |
* | | Capture3: | |
* | | Hdr: INSTANCE=RCS, numregs=c | |
* | | ____________________________________ | |
* | | | Reglist | | |
* | | | - reg1, reg2, ... regc | | |
* | | |__________________________________| | |
* | |______________________________________| |
* | ________________________________________ |
* | | Capture4: | |
* | | Hdr: CLASS=RENDER/COMPUTE, numregs=d| |
* | | ____________________________________ | |
* | | | Reglist | | |
* | | | - reg1, reg2, ... regd | | |
* | | |__________________________________| | |
* | |______________________________________| |
* | ________________________________________ |
* | | Capture5: | |
* | | Hdr: INSTANCE=CCS0, numregs=e | |
* | | ____________________________________ | |
* | | | Reglist | | |
* | | | - reg1, reg2, ... rege | | |
* | | |__________________________________| | |
* | |______________________________________| |
* |__________________________________________|
*/
is_partial = FIELD_GET(CAP_GRP_HDR_CAPTURE_TYPE, ghdr.info);
numlists = FIELD_GET(CAP_GRP_HDR_NUM_CAPTURES, ghdr.info);
while (numlists--) {
if (guc_capture_log_get_data_hdr(guc, buf, &hdr)) {
ret = -EIO;
break;
}
datatype = FIELD_GET(CAP_HDR_CAPTURE_TYPE, hdr.info);
if (datatype > GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE) {
/* unknown capture type - skip over to next capture set */
numregs = FIELD_GET(CAP_HDR_NUM_MMIOS, hdr.num_mmios);
while (numregs--) {
if (guc_capture_log_get_register(guc, buf, &tmp)) {
ret = -EIO;
break;
}
}
continue;
} else if (node) {
/*
* Based on the current capture type and what we have so far,
* decide if we should add the current node into the internal
* linked list for match-up when i915_gpu_coredump calls later
* (and alloc a blank node for the next set of reglists)
* or continue with the same node or clone the current node
* but only retain the global or class registers (such as the
* case of dependent engine resets).
*/
if (datatype == GUC_CAPTURE_LIST_TYPE_GLOBAL) {
guc_capture_add_node_to_outlist(guc->capture, node);
node = NULL;
} else if (datatype == GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS &&
node->reginfo[GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS].num_regs) {
/* Add to list, clone node and duplicate global list */
guc_capture_add_node_to_outlist(guc->capture, node);
node = guc_capture_clone_node(guc, node,
GCAP_PARSED_REGLIST_INDEX_GLOBAL);
} else if (datatype == GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE &&
node->reginfo[GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE].num_regs) {
/* Add to list, clone node and duplicate global + class lists */
guc_capture_add_node_to_outlist(guc->capture, node);
node = guc_capture_clone_node(guc, node,
(GCAP_PARSED_REGLIST_INDEX_GLOBAL |
GCAP_PARSED_REGLIST_INDEX_ENGCLASS));
}
}
if (!node) {
node = guc_capture_get_prealloc_node(guc);
if (!node) {
ret = -ENOMEM;
break;
}
if (datatype != GUC_CAPTURE_LIST_TYPE_GLOBAL)
guc_dbg(guc, "Register capture missing global dump: %08x!\n",
datatype);
}
node->is_partial = is_partial;
node->reginfo[datatype].vfid = FIELD_GET(CAP_HDR_CAPTURE_VFID, hdr.owner);
switch (datatype) {
case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE:
node->eng_class = FIELD_GET(CAP_HDR_ENGINE_CLASS, hdr.info);
node->eng_inst = FIELD_GET(CAP_HDR_ENGINE_INSTANCE, hdr.info);
node->lrca = hdr.lrca;
node->guc_id = hdr.guc_id;
break;
case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS:
node->eng_class = FIELD_GET(CAP_HDR_ENGINE_CLASS, hdr.info);
break;
default:
break;
}
numregs = FIELD_GET(CAP_HDR_NUM_MMIOS, hdr.num_mmios);
if (numregs > guc->capture->max_mmio_per_node) {
guc_dbg(guc, "Register capture list extraction clipped by prealloc!\n");
numregs = guc->capture->max_mmio_per_node;
}
node->reginfo[datatype].num_regs = numregs;
regs = node->reginfo[datatype].regs;
i = 0;
while (numregs--) {
if (guc_capture_log_get_register(guc, buf, ®s[i++])) {
ret = -EIO;
break;
}
}
}
bailout:
if (node) {
/* If we have data, add to linked list for match-up when i915_gpu_coredump calls */
for (i = GUC_CAPTURE_LIST_TYPE_GLOBAL; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
if (node->reginfo[i].regs) {
guc_capture_add_node_to_outlist(guc->capture, node);
node = NULL;
break;
}
}
if (node) /* else return it back to cache list */
guc_capture_add_node_to_cachelist(guc->capture, node);
}
return ret;
}
static int __guc_capture_flushlog_complete(struct intel_guc *guc)
{
u32 action[] = {
INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE,
GUC_CAPTURE_LOG_BUFFER
};
return intel_guc_send_nb(guc, action, ARRAY_SIZE(action), 0);
}
static void __guc_capture_process_output(struct intel_guc *guc)
{
unsigned int buffer_size, read_offset, write_offset, full_count;
struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
struct guc_log_buffer_state log_buf_state_local;
struct guc_log_buffer_state *log_buf_state;
struct __guc_capture_bufstate buf;
void *src_data = NULL;
bool new_overflow;
int ret;
log_buf_state = guc->log.buf_addr +
(sizeof(struct guc_log_buffer_state) * GUC_CAPTURE_LOG_BUFFER);
src_data = guc->log.buf_addr +
intel_guc_get_log_buffer_offset(&guc->log, GUC_CAPTURE_LOG_BUFFER);
/*
* Make a copy of the state structure, inside GuC log buffer
* (which is uncached mapped), on the stack to avoid reading
* from it multiple times.
*/
memcpy(&log_buf_state_local, log_buf_state, sizeof(struct guc_log_buffer_state));
buffer_size = intel_guc_get_log_buffer_size(&guc->log, GUC_CAPTURE_LOG_BUFFER);
read_offset = log_buf_state_local.read_ptr;
write_offset = log_buf_state_local.sampled_write_ptr;
full_count = log_buf_state_local.buffer_full_cnt;
/* Bookkeeping stuff */
guc->log.stats[GUC_CAPTURE_LOG_BUFFER].flush += log_buf_state_local.flush_to_file;
new_overflow = intel_guc_check_log_buf_overflow(&guc->log, GUC_CAPTURE_LOG_BUFFER,
full_count);
/* Now copy the actual logs. */
if (unlikely(new_overflow)) {
/* copy the whole buffer in case of overflow */
read_offset = 0;
write_offset = buffer_size;
} else if (unlikely((read_offset > buffer_size) ||
(write_offset > buffer_size))) {
guc_err(guc, "Register capture buffer in invalid state: read = 0x%X, size = 0x%X!\n",
read_offset, buffer_size);
/* copy whole buffer as offsets are unreliable */
read_offset = 0;
write_offset = buffer_size;
}
buf.size = buffer_size;
buf.rd = read_offset;
buf.wr = write_offset;
buf.data = src_data;
if (!uc->reset_in_progress) {
do {
ret = guc_capture_extract_reglists(guc, &buf);
} while (ret >= 0);
}
/* Update the state of log buffer err-cap state */
log_buf_state->read_ptr = write_offset;
log_buf_state->flush_to_file = 0;
__guc_capture_flushlog_complete(guc);
}
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
static const char *
guc_capture_reg_to_str(const struct intel_guc *guc, u32 owner, u32 type,
u32 class, u32 id, u32 offset, u32 *is_ext)
{
const struct __guc_mmio_reg_descr_group *reglists = guc->capture->reglists;
struct __guc_mmio_reg_descr_group *extlists = guc->capture->extlists;
const struct __guc_mmio_reg_descr_group *match;
struct __guc_mmio_reg_descr_group *matchext;
int j;
*is_ext = 0;
if (!reglists)
return NULL;
match = guc_capture_get_one_list(reglists, owner, type, id);
if (!match)
return NULL;
for (j = 0; j < match->num_regs; ++j) {
if (offset == match->list[j].reg.reg)
return match->list[j].regname;
}
if (extlists) {
matchext = guc_capture_get_one_ext_list(extlists, owner, type, id);
if (!matchext)
return NULL;
for (j = 0; j < matchext->num_regs; ++j) {
if (offset == matchext->extlist[j].reg.reg) {
*is_ext = 1;
return matchext->extlist[j].regname;
}
}
}
return NULL;
}
#define GCAP_PRINT_INTEL_ENG_INFO(ebuf, eng) \
do { \
i915_error_printf(ebuf, " i915-Eng-Name: %s command stream\n", \
(eng)->name); \
i915_error_printf(ebuf, " i915-Eng-Inst-Class: 0x%02x\n", (eng)->class); \
i915_error_printf(ebuf, " i915-Eng-Inst-Id: 0x%02x\n", (eng)->instance); \
i915_error_printf(ebuf, " i915-Eng-LogicalMask: 0x%08x\n", \
(eng)->logical_mask); \
} while (0)
#define GCAP_PRINT_GUC_INST_INFO(ebuf, node) \
do { \
i915_error_printf(ebuf, " GuC-Engine-Inst-Id: 0x%08x\n", \
(node)->eng_inst); \
i915_error_printf(ebuf, " GuC-Context-Id: 0x%08x\n", (node)->guc_id); \
i915_error_printf(ebuf, " LRCA: 0x%08x\n", (node)->lrca); \
} while (0)
int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf,
const struct intel_engine_coredump *ee)
{
const char *grptype[GUC_STATE_CAPTURE_GROUP_TYPE_MAX] = {
"full-capture",
"partial-capture"
};
const char *datatype[GUC_CAPTURE_LIST_TYPE_MAX] = {
"Global",
"Engine-Class",
"Engine-Instance"
};
struct intel_guc_state_capture *cap;
struct __guc_capture_parsed_output *node;
struct intel_engine_cs *eng;
struct guc_mmio_reg *regs;
struct intel_guc *guc;
const char *str;
int numregs, i, j;
u32 is_ext;
if (!ebuf || !ee)
return -EINVAL;
cap = ee->guc_capture;
if (!cap || !ee->engine)
return -ENODEV;
guc = &ee->engine->gt->uc.guc;
i915_error_printf(ebuf, "global --- GuC Error Capture on %s command stream:\n",
ee->engine->name);
node = ee->guc_capture_node;
if (!node) {
i915_error_printf(ebuf, " No matching ee-node\n");
return 0;
}
i915_error_printf(ebuf, "Coverage: %s\n", grptype[node->is_partial]);
for (i = GUC_CAPTURE_LIST_TYPE_GLOBAL; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
i915_error_printf(ebuf, " RegListType: %s\n",
datatype[i % GUC_CAPTURE_LIST_TYPE_MAX]);
i915_error_printf(ebuf, " Owner-Id: %d\n", node->reginfo[i].vfid);
switch (i) {
case GUC_CAPTURE_LIST_TYPE_GLOBAL:
default:
break;
case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS:
i915_error_printf(ebuf, " GuC-Eng-Class: %d\n", node->eng_class);
i915_error_printf(ebuf, " i915-Eng-Class: %d\n",
guc_class_to_engine_class(node->eng_class));
break;
case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE:
eng = intel_guc_lookup_engine(guc, node->eng_class, node->eng_inst);
if (eng)
GCAP_PRINT_INTEL_ENG_INFO(ebuf, eng);
else
i915_error_printf(ebuf, " i915-Eng-Lookup Fail!\n");
GCAP_PRINT_GUC_INST_INFO(ebuf, node);
break;
}
numregs = node->reginfo[i].num_regs;
i915_error_printf(ebuf, " NumRegs: %d\n", numregs);
j = 0;
while (numregs--) {
regs = node->reginfo[i].regs;
str = guc_capture_reg_to_str(guc, GUC_CAPTURE_LIST_INDEX_PF, i,
node->eng_class, 0, regs[j].offset, &is_ext);
if (!str)
i915_error_printf(ebuf, " REG-0x%08x", regs[j].offset);
else
i915_error_printf(ebuf, " %s", str);
if (is_ext)
i915_error_printf(ebuf, "[%ld][%ld]",
FIELD_GET(GUC_REGSET_STEERING_GROUP, regs[j].flags),
FIELD_GET(GUC_REGSET_STEERING_INSTANCE, regs[j].flags));
i915_error_printf(ebuf, ": 0x%08x\n", regs[j].value);
++j;
}
}
return 0;
}
#endif //CONFIG_DRM_I915_CAPTURE_ERROR
static void guc_capture_find_ecode(struct intel_engine_coredump *ee)
{
struct gcap_reg_list_info *reginfo;
struct guc_mmio_reg *regs;
i915_reg_t reg_ipehr = RING_IPEHR(0);
i915_reg_t reg_instdone = RING_INSTDONE(0);
int i;
if (!ee->guc_capture_node)
return;
reginfo = ee->guc_capture_node->reginfo + GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE;
regs = reginfo->regs;
for (i = 0; i < reginfo->num_regs; i++) {
if (regs[i].offset == reg_ipehr.reg)
ee->ipehr = regs[i].value;
else if (regs[i].offset == reg_instdone.reg)
ee->instdone.instdone = regs[i].value;
}
}
void intel_guc_capture_free_node(struct intel_engine_coredump *ee)
{
if (!ee || !ee->guc_capture_node)
return;
guc_capture_add_node_to_cachelist(ee->guc_capture, ee->guc_capture_node);
ee->guc_capture = NULL;
ee->guc_capture_node = NULL;
}
bool intel_guc_capture_is_matching_engine(struct intel_gt *gt,
struct intel_context *ce,
struct intel_engine_cs *engine)
{
struct __guc_capture_parsed_output *n;
struct intel_guc *guc;
if (!gt || !ce || !engine)
return false;
guc = >->uc.guc;
if (!guc->capture)
return false;
/*
* Look for a matching GuC reported error capture node from
* the internal output link-list based on lrca, guc-id and engine
* identification.
*/
list_for_each_entry(n, &guc->capture->outlist, link) {
if (n->eng_inst == GUC_ID_TO_ENGINE_INSTANCE(engine->guc_id) &&
n->eng_class == GUC_ID_TO_ENGINE_CLASS(engine->guc_id) &&
n->guc_id == ce->guc_id.id &&
(n->lrca & CTX_GTT_ADDRESS_MASK) == (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK))
return true;
}
return false;
}
void intel_guc_capture_get_matching_node(struct intel_gt *gt,
struct intel_engine_coredump *ee,
struct intel_context *ce)
{
struct __guc_capture_parsed_output *n, *ntmp;
struct intel_guc *guc;
if (!gt || !ee || !ce)
return;
guc = >->uc.guc;
if (!guc->capture)
return;
GEM_BUG_ON(ee->guc_capture_node);
/*
* Look for a matching GuC reported error capture node from
* the internal output link-list based on lrca, guc-id and engine
* identification.
*/
list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link) {
if (n->eng_inst == GUC_ID_TO_ENGINE_INSTANCE(ee->engine->guc_id) &&
n->eng_class == GUC_ID_TO_ENGINE_CLASS(ee->engine->guc_id) &&
n->guc_id == ce->guc_id.id &&
(n->lrca & CTX_GTT_ADDRESS_MASK) == (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK)) {
list_del(&n->link);
ee->guc_capture_node = n;
ee->guc_capture = guc->capture;
guc_capture_find_ecode(ee);
return;
}
}
guc_warn(guc, "No register capture node found for 0x%04X / 0x%08X\n",
ce->guc_id.id, ce->lrc.lrca);
}
void intel_guc_capture_process(struct intel_guc *guc)
{
if (guc->capture)
__guc_capture_process_output(guc);
}
static void
guc_capture_free_ads_cache(struct intel_guc_state_capture *gc)
{
int i, j, k;
struct __guc_capture_ads_cache *cache;
for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; ++i) {
for (j = 0; j < GUC_CAPTURE_LIST_TYPE_MAX; ++j) {
for (k = 0; k < GUC_MAX_ENGINE_CLASSES; ++k) {
cache = &gc->ads_cache[i][j][k];
if (cache->is_valid)
kfree(cache->ptr);
}
}
}
kfree(gc->ads_null_cache);
}
void intel_guc_capture_destroy(struct intel_guc *guc)
{
if (!guc->capture)
return;
guc_capture_free_ads_cache(guc->capture);
guc_capture_delete_prealloc_nodes(guc);
guc_capture_free_extlists(guc->capture->extlists);
kfree(guc->capture->extlists);
kfree(guc->capture);
guc->capture = NULL;
}
int intel_guc_capture_init(struct intel_guc *guc)
{
guc->capture = kzalloc(sizeof(*guc->capture), GFP_KERNEL);
if (!guc->capture)
return -ENOMEM;
guc->capture->reglists = guc_capture_get_device_reglist(guc);
INIT_LIST_HEAD(&guc->capture->outlist);
INIT_LIST_HEAD(&guc->capture->cachelist);
check_guc_capture_size(guc);
return 0;
}
| linux-master | drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2016-2019 Intel Corporation
*/
#include <linux/string_helpers.h>
#include "gt/intel_gt.h"
#include "gt/intel_gt_print.h"
#include "gt/intel_reset.h"
#include "intel_gsc_fw.h"
#include "intel_gsc_uc.h"
#include "intel_guc.h"
#include "intel_guc_ads.h"
#include "intel_guc_print.h"
#include "intel_guc_submission.h"
#include "gt/intel_rps.h"
#include "intel_uc.h"
#include "i915_drv.h"
#include "i915_hwmon.h"
static const struct intel_uc_ops uc_ops_off;
static const struct intel_uc_ops uc_ops_on;
static void uc_expand_default_options(struct intel_uc *uc)
{
struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
if (i915->params.enable_guc != -1)
return;
/* Don't enable GuC/HuC on pre-Gen12 */
if (GRAPHICS_VER(i915) < 12) {
i915->params.enable_guc = 0;
return;
}
/* Don't enable GuC/HuC on older Gen12 platforms */
if (IS_TIGERLAKE(i915) || IS_ROCKETLAKE(i915)) {
i915->params.enable_guc = 0;
return;
}
/* Intermediate platforms are HuC authentication only */
if (IS_ALDERLAKE_S(i915) && !IS_RAPTORLAKE_S(i915)) {
i915->params.enable_guc = ENABLE_GUC_LOAD_HUC;
return;
}
/* Default: enable HuC authentication and GuC submission */
i915->params.enable_guc = ENABLE_GUC_LOAD_HUC | ENABLE_GUC_SUBMISSION;
/* XEHPSDV and PVC do not use HuC */
if (IS_XEHPSDV(i915) || IS_PONTEVECCHIO(i915))
i915->params.enable_guc &= ~ENABLE_GUC_LOAD_HUC;
}
/* Reset GuC providing us with fresh state for both GuC and HuC.
*/
static int __intel_uc_reset_hw(struct intel_uc *uc)
{
struct intel_gt *gt = uc_to_gt(uc);
int ret;
u32 guc_status;
ret = i915_inject_probe_error(gt->i915, -ENXIO);
if (ret)
return ret;
ret = intel_reset_guc(gt);
if (ret) {
gt_err(gt, "Failed to reset GuC, ret = %d\n", ret);
return ret;
}
guc_status = intel_uncore_read(gt->uncore, GUC_STATUS);
gt_WARN(gt, !(guc_status & GS_MIA_IN_RESET),
"GuC status: 0x%x, MIA core expected to be in reset\n",
guc_status);
return ret;
}
static void __confirm_options(struct intel_uc *uc)
{
struct intel_gt *gt = uc_to_gt(uc);
struct drm_i915_private *i915 = gt->i915;
gt_dbg(gt, "enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n",
i915->params.enable_guc,
str_yes_no(intel_uc_wants_guc(uc)),
str_yes_no(intel_uc_wants_guc_submission(uc)),
str_yes_no(intel_uc_wants_huc(uc)),
str_yes_no(intel_uc_wants_guc_slpc(uc)));
if (i915->params.enable_guc == 0) {
GEM_BUG_ON(intel_uc_wants_guc(uc));
GEM_BUG_ON(intel_uc_wants_guc_submission(uc));
GEM_BUG_ON(intel_uc_wants_huc(uc));
GEM_BUG_ON(intel_uc_wants_guc_slpc(uc));
return;
}
if (!intel_uc_supports_guc(uc))
gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
i915->params.enable_guc, "GuC is not supported!");
if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC &&
!intel_uc_supports_huc(uc))
gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
i915->params.enable_guc, "HuC is not supported!");
if (i915->params.enable_guc & ENABLE_GUC_SUBMISSION &&
!intel_uc_supports_guc_submission(uc))
gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
i915->params.enable_guc, "GuC submission is N/A");
if (i915->params.enable_guc & ~ENABLE_GUC_MASK)
gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
i915->params.enable_guc, "undocumented flag");
}
void intel_uc_init_early(struct intel_uc *uc)
{
uc_expand_default_options(uc);
intel_guc_init_early(&uc->guc);
intel_huc_init_early(&uc->huc);
intel_gsc_uc_init_early(&uc->gsc);
__confirm_options(uc);
if (intel_uc_wants_guc(uc))
uc->ops = &uc_ops_on;
else
uc->ops = &uc_ops_off;
}
void intel_uc_init_late(struct intel_uc *uc)
{
intel_guc_init_late(&uc->guc);
intel_gsc_uc_load_start(&uc->gsc);
}
void intel_uc_driver_late_release(struct intel_uc *uc)
{
}
/**
* intel_uc_init_mmio - setup uC MMIO access
* @uc: the intel_uc structure
*
* Setup minimal state necessary for MMIO accesses later in the
* initialization sequence.
*/
void intel_uc_init_mmio(struct intel_uc *uc)
{
intel_guc_init_send_regs(&uc->guc);
}
static void __uc_capture_load_err_log(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
if (guc->log.vma && !uc->load_err_log)
uc->load_err_log = i915_gem_object_get(guc->log.vma->obj);
}
static void __uc_free_load_err_log(struct intel_uc *uc)
{
struct drm_i915_gem_object *log = fetch_and_zero(&uc->load_err_log);
if (log)
i915_gem_object_put(log);
}
void intel_uc_driver_remove(struct intel_uc *uc)
{
intel_uc_fini_hw(uc);
intel_uc_fini(uc);
__uc_free_load_err_log(uc);
}
/*
* Events triggered while CT buffers are disabled are logged in the SCRATCH_15
* register using the same bits used in the CT message payload. Since our
* communication channel with guc is turned off at this point, we can save the
* message and handle it after we turn it back on.
*/
static void guc_clear_mmio_msg(struct intel_guc *guc)
{
intel_uncore_write(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15), 0);
}
static void guc_get_mmio_msg(struct intel_guc *guc)
{
u32 val;
spin_lock_irq(&guc->irq_lock);
val = intel_uncore_read(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15));
guc->mmio_msg |= val & guc->msg_enabled_mask;
/*
* clear all events, including the ones we're not currently servicing,
* to make sure we don't try to process a stale message if we enable
* handling of more events later.
*/
guc_clear_mmio_msg(guc);
spin_unlock_irq(&guc->irq_lock);
}
static void guc_handle_mmio_msg(struct intel_guc *guc)
{
/* we need communication to be enabled to reply to GuC */
GEM_BUG_ON(!intel_guc_ct_enabled(&guc->ct));
spin_lock_irq(&guc->irq_lock);
if (guc->mmio_msg) {
intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1);
guc->mmio_msg = 0;
}
spin_unlock_irq(&guc->irq_lock);
}
static int guc_enable_communication(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
struct drm_i915_private *i915 = gt->i915;
int ret;
GEM_BUG_ON(intel_guc_ct_enabled(&guc->ct));
ret = i915_inject_probe_error(i915, -ENXIO);
if (ret)
return ret;
ret = intel_guc_ct_enable(&guc->ct);
if (ret)
return ret;
/* check for mmio messages received before/during the CT enable */
guc_get_mmio_msg(guc);
guc_handle_mmio_msg(guc);
intel_guc_enable_interrupts(guc);
/* check for CT messages received before we enabled interrupts */
spin_lock_irq(gt->irq_lock);
intel_guc_ct_event_handler(&guc->ct);
spin_unlock_irq(gt->irq_lock);
guc_dbg(guc, "communication enabled\n");
return 0;
}
static void guc_disable_communication(struct intel_guc *guc)
{
/*
* Events generated during or after CT disable are logged by guc in
* via mmio. Make sure the register is clear before disabling CT since
* all events we cared about have already been processed via CT.
*/
guc_clear_mmio_msg(guc);
intel_guc_disable_interrupts(guc);
intel_guc_ct_disable(&guc->ct);
/*
* Check for messages received during/after the CT disable. We do not
* expect any messages to have arrived via CT between the interrupt
* disable and the CT disable because GuC should've been idle until we
* triggered the CT disable protocol.
*/
guc_get_mmio_msg(guc);
guc_dbg(guc, "communication disabled\n");
}
static void __uc_fetch_firmwares(struct intel_uc *uc)
{
struct intel_gt *gt = uc_to_gt(uc);
int err;
GEM_BUG_ON(!intel_uc_wants_guc(uc));
err = intel_uc_fw_fetch(&uc->guc.fw);
if (err) {
/* Make sure we transition out of transient "SELECTED" state */
if (intel_uc_wants_huc(uc)) {
gt_dbg(gt, "Failed to fetch GuC fw (%pe) disabling HuC\n", ERR_PTR(err));
intel_uc_fw_change_status(&uc->huc.fw,
INTEL_UC_FIRMWARE_ERROR);
}
if (intel_uc_wants_gsc_uc(uc)) {
gt_dbg(gt, "Failed to fetch GuC fw (%pe) disabling GSC\n", ERR_PTR(err));
intel_uc_fw_change_status(&uc->gsc.fw,
INTEL_UC_FIRMWARE_ERROR);
}
return;
}
if (intel_uc_wants_huc(uc))
intel_uc_fw_fetch(&uc->huc.fw);
if (intel_uc_wants_gsc_uc(uc))
intel_uc_fw_fetch(&uc->gsc.fw);
}
static void __uc_cleanup_firmwares(struct intel_uc *uc)
{
intel_uc_fw_cleanup_fetch(&uc->gsc.fw);
intel_uc_fw_cleanup_fetch(&uc->huc.fw);
intel_uc_fw_cleanup_fetch(&uc->guc.fw);
}
static int __uc_init(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
struct intel_huc *huc = &uc->huc;
int ret;
GEM_BUG_ON(!intel_uc_wants_guc(uc));
if (!intel_uc_uses_guc(uc))
return 0;
if (i915_inject_probe_failure(uc_to_gt(uc)->i915))
return -ENOMEM;
ret = intel_guc_init(guc);
if (ret)
return ret;
if (intel_uc_uses_huc(uc))
intel_huc_init(huc);
if (intel_uc_uses_gsc_uc(uc))
intel_gsc_uc_init(&uc->gsc);
return 0;
}
static void __uc_fini(struct intel_uc *uc)
{
intel_gsc_uc_fini(&uc->gsc);
intel_huc_fini(&uc->huc);
intel_guc_fini(&uc->guc);
}
static int __uc_sanitize(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
struct intel_huc *huc = &uc->huc;
GEM_BUG_ON(!intel_uc_supports_guc(uc));
intel_huc_sanitize(huc);
intel_guc_sanitize(guc);
return __intel_uc_reset_hw(uc);
}
/* Initialize and verify the uC regs related to uC positioning in WOPCM */
static int uc_init_wopcm(struct intel_uc *uc)
{
struct intel_gt *gt = uc_to_gt(uc);
struct intel_uncore *uncore = gt->uncore;
u32 base = intel_wopcm_guc_base(>->wopcm);
u32 size = intel_wopcm_guc_size(>->wopcm);
u32 huc_agent = intel_uc_uses_huc(uc) ? HUC_LOADING_AGENT_GUC : 0;
u32 mask;
int err;
if (unlikely(!base || !size)) {
gt_probe_error(gt, "Unsuccessful WOPCM partitioning\n");
return -E2BIG;
}
GEM_BUG_ON(!intel_uc_supports_guc(uc));
GEM_BUG_ON(!(base & GUC_WOPCM_OFFSET_MASK));
GEM_BUG_ON(base & ~GUC_WOPCM_OFFSET_MASK);
GEM_BUG_ON(!(size & GUC_WOPCM_SIZE_MASK));
GEM_BUG_ON(size & ~GUC_WOPCM_SIZE_MASK);
err = i915_inject_probe_error(gt->i915, -ENXIO);
if (err)
return err;
mask = GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED;
err = intel_uncore_write_and_verify(uncore, GUC_WOPCM_SIZE, size, mask,
size | GUC_WOPCM_SIZE_LOCKED);
if (err)
goto err_out;
mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent;
err = intel_uncore_write_and_verify(uncore, DMA_GUC_WOPCM_OFFSET,
base | huc_agent, mask,
base | huc_agent |
GUC_WOPCM_OFFSET_VALID);
if (err)
goto err_out;
return 0;
err_out:
gt_probe_error(gt, "Failed to init uC WOPCM registers!\n");
gt_probe_error(gt, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET",
i915_mmio_reg_offset(DMA_GUC_WOPCM_OFFSET),
intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET));
gt_probe_error(gt, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE",
i915_mmio_reg_offset(GUC_WOPCM_SIZE),
intel_uncore_read(uncore, GUC_WOPCM_SIZE));
return err;
}
static bool uc_is_wopcm_locked(struct intel_uc *uc)
{
struct intel_gt *gt = uc_to_gt(uc);
struct intel_uncore *uncore = gt->uncore;
return (intel_uncore_read(uncore, GUC_WOPCM_SIZE) & GUC_WOPCM_SIZE_LOCKED) ||
(intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET) & GUC_WOPCM_OFFSET_VALID);
}
static int __uc_check_hw(struct intel_uc *uc)
{
if (uc->fw_table_invalid)
return -EIO;
if (!intel_uc_supports_guc(uc))
return 0;
/*
* We can silently continue without GuC only if it was never enabled
* before on this system after reboot, otherwise we risk GPU hangs.
* To check if GuC was loaded before we look at WOPCM registers.
*/
if (uc_is_wopcm_locked(uc))
return -EIO;
return 0;
}
static void print_fw_ver(struct intel_gt *gt, struct intel_uc_fw *fw)
{
gt_info(gt, "%s firmware %s version %u.%u.%u\n",
intel_uc_fw_type_repr(fw->type), fw->file_selected.path,
fw->file_selected.ver.major,
fw->file_selected.ver.minor,
fw->file_selected.ver.patch);
}
static int __uc_init_hw(struct intel_uc *uc)
{
struct intel_gt *gt = uc_to_gt(uc);
struct drm_i915_private *i915 = gt->i915;
struct intel_guc *guc = &uc->guc;
struct intel_huc *huc = &uc->huc;
int ret, attempts;
bool pl1en = false;
GEM_BUG_ON(!intel_uc_supports_guc(uc));
GEM_BUG_ON(!intel_uc_wants_guc(uc));
print_fw_ver(gt, &guc->fw);
if (intel_uc_uses_huc(uc))
print_fw_ver(gt, &huc->fw);
if (!intel_uc_fw_is_loadable(&guc->fw)) {
ret = __uc_check_hw(uc) ||
intel_uc_fw_is_overridden(&guc->fw) ||
intel_uc_wants_guc_submission(uc) ?
intel_uc_fw_status_to_error(guc->fw.status) : 0;
goto err_out;
}
ret = uc_init_wopcm(uc);
if (ret)
goto err_out;
intel_guc_reset_interrupts(guc);
/* WaEnableuKernelHeaderValidFix:skl */
/* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
if (GRAPHICS_VER(i915) == 9)
attempts = 3;
else
attempts = 1;
/* Disable a potentially low PL1 power limit to allow freq to be raised */
i915_hwmon_power_max_disable(gt->i915, &pl1en);
intel_rps_raise_unslice(&uc_to_gt(uc)->rps);
while (attempts--) {
/*
* Always reset the GuC just before (re)loading, so
* that the state and timing are fairly predictable
*/
ret = __uc_sanitize(uc);
if (ret)
goto err_rps;
intel_huc_fw_upload(huc);
intel_guc_ads_reset(guc);
intel_guc_write_params(guc);
ret = intel_guc_fw_upload(guc);
if (ret == 0)
break;
gt_dbg(gt, "GuC fw load failed (%pe) will reset and retry %d more time(s)\n",
ERR_PTR(ret), attempts);
}
/* Did we succeded or run out of retries? */
if (ret)
goto err_log_capture;
ret = guc_enable_communication(guc);
if (ret)
goto err_log_capture;
/*
* GSC-loaded HuC is authenticated by the GSC, so we don't need to
* trigger the auth here. However, given that the HuC loaded this way
* survive GT reset, we still need to update our SW bookkeeping to make
* sure it reflects the correct HW status.
*/
if (intel_huc_is_loaded_by_gsc(huc))
intel_huc_update_auth_status(huc);
else
intel_huc_auth(huc, INTEL_HUC_AUTH_BY_GUC);
if (intel_uc_uses_guc_submission(uc)) {
ret = intel_guc_submission_enable(guc);
if (ret)
goto err_log_capture;
}
if (intel_uc_uses_guc_slpc(uc)) {
ret = intel_guc_slpc_enable(&guc->slpc);
if (ret)
goto err_submission;
} else {
/* Restore GT back to RPn for non-SLPC path */
intel_rps_lower_unslice(&uc_to_gt(uc)->rps);
}
i915_hwmon_power_max_restore(gt->i915, pl1en);
guc_info(guc, "submission %s\n", str_enabled_disabled(intel_uc_uses_guc_submission(uc)));
guc_info(guc, "SLPC %s\n", str_enabled_disabled(intel_uc_uses_guc_slpc(uc)));
return 0;
/*
* We've failed to load the firmware :(
*/
err_submission:
intel_guc_submission_disable(guc);
err_log_capture:
__uc_capture_load_err_log(uc);
err_rps:
/* Return GT back to RPn */
intel_rps_lower_unslice(&uc_to_gt(uc)->rps);
i915_hwmon_power_max_restore(gt->i915, pl1en);
err_out:
__uc_sanitize(uc);
if (!ret) {
gt_notice(gt, "GuC is uninitialized\n");
/* We want to run without GuC submission */
return 0;
}
gt_probe_error(gt, "GuC initialization failed %pe\n", ERR_PTR(ret));
/* We want to keep KMS alive */
return -EIO;
}
static void __uc_fini_hw(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
if (!intel_guc_is_fw_running(guc))
return;
if (intel_uc_uses_guc_submission(uc))
intel_guc_submission_disable(guc);
__uc_sanitize(uc);
}
/**
* intel_uc_reset_prepare - Prepare for reset
* @uc: the intel_uc structure
*
* Preparing for full gpu reset.
*/
void intel_uc_reset_prepare(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
uc->reset_in_progress = true;
/* Nothing to do if GuC isn't supported */
if (!intel_uc_supports_guc(uc))
return;
/* Firmware expected to be running when this function is called */
if (!intel_guc_is_ready(guc))
goto sanitize;
if (intel_uc_uses_guc_submission(uc))
intel_guc_submission_reset_prepare(guc);
sanitize:
__uc_sanitize(uc);
}
void intel_uc_reset(struct intel_uc *uc, intel_engine_mask_t stalled)
{
struct intel_guc *guc = &uc->guc;
/* Firmware can not be running when this function is called */
if (intel_uc_uses_guc_submission(uc))
intel_guc_submission_reset(guc, stalled);
}
void intel_uc_reset_finish(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
uc->reset_in_progress = false;
/* Firmware expected to be running when this function is called */
if (intel_guc_is_fw_running(guc) && intel_uc_uses_guc_submission(uc))
intel_guc_submission_reset_finish(guc);
}
void intel_uc_cancel_requests(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
/* Firmware can not be running when this function is called */
if (intel_uc_uses_guc_submission(uc))
intel_guc_submission_cancel_requests(guc);
}
void intel_uc_runtime_suspend(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
if (!intel_guc_is_ready(guc)) {
guc->interrupts.enabled = false;
return;
}
/*
* Wait for any outstanding CTB before tearing down communication /w the
* GuC.
*/
#define OUTSTANDING_CTB_TIMEOUT_PERIOD (HZ / 5)
intel_guc_wait_for_pending_msg(guc, &guc->outstanding_submission_g2h,
false, OUTSTANDING_CTB_TIMEOUT_PERIOD);
GEM_WARN_ON(atomic_read(&guc->outstanding_submission_g2h));
guc_disable_communication(guc);
}
void intel_uc_suspend(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
intel_wakeref_t wakeref;
int err;
/* flush the GSC worker */
intel_gsc_uc_flush_work(&uc->gsc);
if (!intel_guc_is_ready(guc)) {
guc->interrupts.enabled = false;
return;
}
with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref) {
err = intel_guc_suspend(guc);
if (err)
guc_dbg(guc, "Failed to suspend, %pe", ERR_PTR(err));
}
}
static void __uc_resume_mappings(struct intel_uc *uc)
{
intel_uc_fw_resume_mapping(&uc->guc.fw);
intel_uc_fw_resume_mapping(&uc->huc.fw);
}
static int __uc_resume(struct intel_uc *uc, bool enable_communication)
{
struct intel_guc *guc = &uc->guc;
struct intel_gt *gt = guc_to_gt(guc);
int err;
if (!intel_guc_is_fw_running(guc))
return 0;
/* Make sure we enable communication if and only if it's disabled */
GEM_BUG_ON(enable_communication == intel_guc_ct_enabled(&guc->ct));
if (enable_communication)
guc_enable_communication(guc);
/* If we are only resuming GuC communication but not reloading
* GuC, we need to ensure the ARAT timer interrupt is enabled
* again. In case of GuC reload, it is enabled during SLPC enable.
*/
if (enable_communication && intel_uc_uses_guc_slpc(uc))
intel_guc_pm_intrmsk_enable(gt);
err = intel_guc_resume(guc);
if (err) {
guc_dbg(guc, "Failed to resume, %pe", ERR_PTR(err));
return err;
}
intel_gsc_uc_resume(&uc->gsc);
return 0;
}
int intel_uc_resume(struct intel_uc *uc)
{
/*
* When coming out of S3/S4 we sanitize and re-init the HW, so
* communication is already re-enabled at this point.
*/
return __uc_resume(uc, false);
}
int intel_uc_runtime_resume(struct intel_uc *uc)
{
/*
* During runtime resume we don't sanitize, so we need to re-init
* communication as well.
*/
return __uc_resume(uc, true);
}
static const struct intel_uc_ops uc_ops_off = {
.init_hw = __uc_check_hw,
.fini = __uc_fini, /* to clean-up the init_early initialization */
};
static const struct intel_uc_ops uc_ops_on = {
.sanitize = __uc_sanitize,
.init_fw = __uc_fetch_firmwares,
.fini_fw = __uc_cleanup_firmwares,
.init = __uc_init,
.fini = __uc_fini,
.init_hw = __uc_init_hw,
.fini_hw = __uc_fini_hw,
.resume_mappings = __uc_resume_mappings,
};
| linux-master | drivers/gpu/drm/i915/gt/uc/intel_uc.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021 Intel Corporation
*/
#include <linux/string_helpers.h>
#include "intel_guc_rc.h"
#include "intel_guc_print.h"
#include "gt/intel_gt.h"
#include "i915_drv.h"
static bool __guc_rc_supported(struct intel_guc *guc)
{
/* GuC RC is unavailable for pre-Gen12 */
return guc->submission_supported &&
GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
}
static bool __guc_rc_selected(struct intel_guc *guc)
{
if (!intel_guc_rc_is_supported(guc))
return false;
return guc->submission_selected;
}
void intel_guc_rc_init_early(struct intel_guc *guc)
{
guc->rc_supported = __guc_rc_supported(guc);
guc->rc_selected = __guc_rc_selected(guc);
}
static int guc_action_control_gucrc(struct intel_guc *guc, bool enable)
{
u32 rc_mode = enable ? INTEL_GUCRC_FIRMWARE_CONTROL :
INTEL_GUCRC_HOST_CONTROL;
u32 action[] = {
INTEL_GUC_ACTION_SETUP_PC_GUCRC,
rc_mode
};
int ret;
ret = intel_guc_send(guc, action, ARRAY_SIZE(action));
ret = ret > 0 ? -EPROTO : ret;
return ret;
}
static int __guc_rc_control(struct intel_guc *guc, bool enable)
{
struct intel_gt *gt = guc_to_gt(guc);
int ret;
if (!intel_uc_uses_guc_rc(>->uc))
return -EOPNOTSUPP;
if (!intel_guc_is_ready(guc))
return -EINVAL;
ret = guc_action_control_gucrc(guc, enable);
if (ret) {
guc_probe_error(guc, "Failed to %s RC (%pe)\n",
str_enable_disable(enable), ERR_PTR(ret));
return ret;
}
guc_info(guc, "RC %s\n", str_enabled_disabled(enable));
return 0;
}
int intel_guc_rc_enable(struct intel_guc *guc)
{
return __guc_rc_control(guc, true);
}
int intel_guc_rc_disable(struct intel_guc *guc)
{
return __guc_rc_control(guc, false);
}
| linux-master | drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2016-2019 Intel Corporation
*/
#include <linux/bitfield.h>
#include <linux/firmware.h>
#include <linux/highmem.h>
#include <drm/drm_cache.h>
#include <drm/drm_print.h>
#include "gem/i915_gem_lmem.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_print.h"
#include "intel_gsc_binary_headers.h"
#include "intel_gsc_fw.h"
#include "intel_uc_fw.h"
#include "intel_uc_fw_abi.h"
#include "i915_drv.h"
#include "i915_reg.h"
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
#define UNEXPECTED gt_probe_error
#else
#define UNEXPECTED gt_notice
#endif
static inline struct intel_gt *
____uc_fw_to_gt(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
{
GEM_BUG_ON(type >= INTEL_UC_FW_NUM_TYPES);
switch (type) {
case INTEL_UC_FW_TYPE_GUC:
return container_of(uc_fw, struct intel_gt, uc.guc.fw);
case INTEL_UC_FW_TYPE_HUC:
return container_of(uc_fw, struct intel_gt, uc.huc.fw);
case INTEL_UC_FW_TYPE_GSC:
return container_of(uc_fw, struct intel_gt, uc.gsc.fw);
}
return NULL;
}
static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
{
GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
return ____uc_fw_to_gt(uc_fw, uc_fw->type);
}
#ifdef CONFIG_DRM_I915_DEBUG_GUC
void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
enum intel_uc_fw_status status)
{
uc_fw->__status = status;
gt_dbg(__uc_fw_to_gt(uc_fw), "%s firmware -> %s\n",
intel_uc_fw_type_repr(uc_fw->type),
status == INTEL_UC_FIRMWARE_SELECTED ?
uc_fw->file_selected.path : intel_uc_fw_status_repr(status));
}
#endif
/*
* List of required GuC and HuC binaries per-platform.
* Must be ordered based on platform + revid, from newer to older.
*
* Note that RKL and ADL-S have the same GuC/HuC device ID's and use the same
* firmware as TGL.
*
* Version numbers:
* Originally, the driver required an exact match major/minor/patch furmware
* file and only supported that one version for any given platform. However,
* the new direction from upstream is to be backwards compatible with all
* prior releases and to be as flexible as possible as to what firmware is
* loaded.
*
* For GuC, the major version number signifies a backwards breaking API change.
* So, new format GuC firmware files are labelled by their major version only.
* For HuC, there is no KMD interaction, hence no version matching requirement.
* So, new format HuC firmware files have no version number at all.
*
* All of which means that the table below must keep all old format files with
* full three point version number. But newer files have reduced requirements.
* Having said that, the driver still needs to track the minor version number
* for GuC at least. As it is useful to report to the user that they are not
* running with a recent enough version for all KMD supported features,
* security fixes, etc. to be enabled.
*/
#define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_maj, guc_mmp) \
fw_def(METEORLAKE, 0, guc_maj(mtl, 70, 6, 6)) \
fw_def(DG2, 0, guc_maj(dg2, 70, 5, 1)) \
fw_def(ALDERLAKE_P, 0, guc_maj(adlp, 70, 5, 1)) \
fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 70, 1, 1)) \
fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 69, 0, 3)) \
fw_def(ALDERLAKE_S, 0, guc_maj(tgl, 70, 5, 1)) \
fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 70, 1, 1)) \
fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 69, 0, 3)) \
fw_def(DG1, 0, guc_maj(dg1, 70, 5, 1)) \
fw_def(ROCKETLAKE, 0, guc_mmp(tgl, 70, 1, 1)) \
fw_def(TIGERLAKE, 0, guc_mmp(tgl, 70, 1, 1)) \
fw_def(JASPERLAKE, 0, guc_mmp(ehl, 70, 1, 1)) \
fw_def(ELKHARTLAKE, 0, guc_mmp(ehl, 70, 1, 1)) \
fw_def(ICELAKE, 0, guc_mmp(icl, 70, 1, 1)) \
fw_def(COMETLAKE, 5, guc_mmp(cml, 70, 1, 1)) \
fw_def(COMETLAKE, 0, guc_mmp(kbl, 70, 1, 1)) \
fw_def(COFFEELAKE, 0, guc_mmp(kbl, 70, 1, 1)) \
fw_def(GEMINILAKE, 0, guc_mmp(glk, 70, 1, 1)) \
fw_def(KABYLAKE, 0, guc_mmp(kbl, 70, 1, 1)) \
fw_def(BROXTON, 0, guc_mmp(bxt, 70, 1, 1)) \
fw_def(SKYLAKE, 0, guc_mmp(skl, 70, 1, 1))
#define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_raw, huc_mmp, huc_gsc) \
fw_def(METEORLAKE, 0, huc_gsc(mtl)) \
fw_def(DG2, 0, huc_gsc(dg2)) \
fw_def(ALDERLAKE_P, 0, huc_raw(tgl)) \
fw_def(ALDERLAKE_P, 0, huc_mmp(tgl, 7, 9, 3)) \
fw_def(ALDERLAKE_S, 0, huc_raw(tgl)) \
fw_def(ALDERLAKE_S, 0, huc_mmp(tgl, 7, 9, 3)) \
fw_def(DG1, 0, huc_raw(dg1)) \
fw_def(ROCKETLAKE, 0, huc_mmp(tgl, 7, 9, 3)) \
fw_def(TIGERLAKE, 0, huc_mmp(tgl, 7, 9, 3)) \
fw_def(JASPERLAKE, 0, huc_mmp(ehl, 9, 0, 0)) \
fw_def(ELKHARTLAKE, 0, huc_mmp(ehl, 9, 0, 0)) \
fw_def(ICELAKE, 0, huc_mmp(icl, 9, 0, 0)) \
fw_def(COMETLAKE, 5, huc_mmp(cml, 4, 0, 0)) \
fw_def(COMETLAKE, 0, huc_mmp(kbl, 4, 0, 0)) \
fw_def(COFFEELAKE, 0, huc_mmp(kbl, 4, 0, 0)) \
fw_def(GEMINILAKE, 0, huc_mmp(glk, 4, 0, 0)) \
fw_def(KABYLAKE, 0, huc_mmp(kbl, 4, 0, 0)) \
fw_def(BROXTON, 0, huc_mmp(bxt, 2, 0, 0)) \
fw_def(SKYLAKE, 0, huc_mmp(skl, 2, 0, 0))
/*
* Set of macros for producing a list of filenames from the above table.
*/
#define __MAKE_UC_FW_PATH_BLANK(prefix_, name_) \
"i915/" \
__stringify(prefix_) "_" name_ ".bin"
#define __MAKE_UC_FW_PATH_MAJOR(prefix_, name_, major_) \
"i915/" \
__stringify(prefix_) "_" name_ "_" \
__stringify(major_) ".bin"
#define __MAKE_UC_FW_PATH_MMP(prefix_, name_, major_, minor_, patch_) \
"i915/" \
__stringify(prefix_) "_" name_ "_" \
__stringify(major_) "." \
__stringify(minor_) "." \
__stringify(patch_) ".bin"
/* Minor for internal driver use, not part of file name */
#define MAKE_GUC_FW_PATH_MAJOR(prefix_, major_, minor_, patch_) \
__MAKE_UC_FW_PATH_MAJOR(prefix_, "guc", major_)
#define MAKE_GUC_FW_PATH_MMP(prefix_, major_, minor_, patch_) \
__MAKE_UC_FW_PATH_MMP(prefix_, "guc", major_, minor_, patch_)
#define MAKE_HUC_FW_PATH_BLANK(prefix_) \
__MAKE_UC_FW_PATH_BLANK(prefix_, "huc")
#define MAKE_HUC_FW_PATH_GSC(prefix_) \
__MAKE_UC_FW_PATH_BLANK(prefix_, "huc_gsc")
#define MAKE_HUC_FW_PATH_MMP(prefix_, major_, minor_, patch_) \
__MAKE_UC_FW_PATH_MMP(prefix_, "huc", major_, minor_, patch_)
/*
* All blobs need to be declared via MODULE_FIRMWARE().
* This first expansion of the table macros is solely to provide
* that declaration.
*/
#define INTEL_UC_MODULE_FW(platform_, revid_, uc_) \
MODULE_FIRMWARE(uc_);
INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH_MAJOR, MAKE_GUC_FW_PATH_MMP)
INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH_BLANK, MAKE_HUC_FW_PATH_MMP, MAKE_HUC_FW_PATH_GSC)
/*
* The next expansion of the table macros (in __uc_fw_auto_select below) provides
* actual data structures with both the filename and the version information.
* These structure arrays are then iterated over to the list of suitable files
* for the current platform and to then attempt to load those files, in the order
* listed, until one is successfully found.
*/
struct __packed uc_fw_blob {
const char *path;
bool legacy;
u8 major;
u8 minor;
u8 patch;
bool has_gsc_headers;
};
#define UC_FW_BLOB_BASE(major_, minor_, patch_, path_) \
.major = major_, \
.minor = minor_, \
.patch = patch_, \
.path = path_,
#define UC_FW_BLOB_NEW(major_, minor_, patch_, gsc_, path_) \
{ UC_FW_BLOB_BASE(major_, minor_, patch_, path_) \
.legacy = false, .has_gsc_headers = gsc_ }
#define UC_FW_BLOB_OLD(major_, minor_, patch_, path_) \
{ UC_FW_BLOB_BASE(major_, minor_, patch_, path_) \
.legacy = true }
#define GUC_FW_BLOB(prefix_, major_, minor_, patch_) \
UC_FW_BLOB_NEW(major_, minor_, patch_, false, \
MAKE_GUC_FW_PATH_MAJOR(prefix_, major_, minor_, patch_))
#define GUC_FW_BLOB_MMP(prefix_, major_, minor_, patch_) \
UC_FW_BLOB_OLD(major_, minor_, patch_, \
MAKE_GUC_FW_PATH_MMP(prefix_, major_, minor_, patch_))
#define HUC_FW_BLOB(prefix_) \
UC_FW_BLOB_NEW(0, 0, 0, false, MAKE_HUC_FW_PATH_BLANK(prefix_))
#define HUC_FW_BLOB_MMP(prefix_, major_, minor_, patch_) \
UC_FW_BLOB_OLD(major_, minor_, patch_, \
MAKE_HUC_FW_PATH_MMP(prefix_, major_, minor_, patch_))
#define HUC_FW_BLOB_GSC(prefix_) \
UC_FW_BLOB_NEW(0, 0, 0, true, MAKE_HUC_FW_PATH_GSC(prefix_))
struct __packed uc_fw_platform_requirement {
enum intel_platform p;
u8 rev; /* first platform rev using this FW */
const struct uc_fw_blob blob;
};
#define MAKE_FW_LIST(platform_, revid_, uc_) \
{ \
.p = INTEL_##platform_, \
.rev = revid_, \
.blob = uc_, \
},
struct fw_blobs_by_type {
const struct uc_fw_platform_requirement *blobs;
u32 count;
};
static const struct uc_fw_platform_requirement blobs_guc[] = {
INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB, GUC_FW_BLOB_MMP)
};
static const struct uc_fw_platform_requirement blobs_huc[] = {
INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB, HUC_FW_BLOB_MMP, HUC_FW_BLOB_GSC)
};
static const struct fw_blobs_by_type blobs_all[INTEL_UC_FW_NUM_TYPES] = {
[INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) },
[INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) },
};
static void
__uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
{
const struct uc_fw_platform_requirement *fw_blobs;
enum intel_platform p = INTEL_INFO(i915)->platform;
u32 fw_count;
u8 rev = INTEL_REVID(i915);
int i;
bool found;
/*
* GSC FW support is still not fully in place, so we're not defining
* the FW blob yet because we don't want the driver to attempt to load
* it until we're ready for it.
*/
if (uc_fw->type == INTEL_UC_FW_TYPE_GSC)
return;
/*
* The only difference between the ADL GuC FWs is the HWConfig support.
* ADL-N does not support HWConfig, so we should use the same binary as
* ADL-S, otherwise the GuC might attempt to fetch a config table that
* does not exist.
*/
if (IS_ALDERLAKE_P_N(i915))
p = INTEL_ALDERLAKE_S;
GEM_BUG_ON(uc_fw->type >= ARRAY_SIZE(blobs_all));
fw_blobs = blobs_all[uc_fw->type].blobs;
fw_count = blobs_all[uc_fw->type].count;
found = false;
for (i = 0; i < fw_count && p <= fw_blobs[i].p; i++) {
const struct uc_fw_blob *blob = &fw_blobs[i].blob;
if (p != fw_blobs[i].p)
continue;
if (rev < fw_blobs[i].rev)
continue;
if (uc_fw->file_selected.path) {
/*
* Continuing an earlier search after a found blob failed to load.
* Once the previously chosen path has been found, clear it out
* and let the search continue from there.
*/
if (uc_fw->file_selected.path == blob->path)
uc_fw->file_selected.path = NULL;
continue;
}
uc_fw->file_selected.path = blob->path;
uc_fw->file_wanted.path = blob->path;
uc_fw->file_wanted.ver.major = blob->major;
uc_fw->file_wanted.ver.minor = blob->minor;
uc_fw->file_wanted.ver.patch = blob->patch;
uc_fw->has_gsc_headers = blob->has_gsc_headers;
found = true;
break;
}
if (!found && uc_fw->file_selected.path) {
/* Failed to find a match for the last attempt?! */
uc_fw->file_selected.path = NULL;
}
}
static bool validate_fw_table_type(struct drm_i915_private *i915, enum intel_uc_fw_type type)
{
const struct uc_fw_platform_requirement *fw_blobs;
u32 fw_count;
int i, j;
if (type >= ARRAY_SIZE(blobs_all)) {
drm_err(&i915->drm, "No blob array for %s\n", intel_uc_fw_type_repr(type));
return false;
}
fw_blobs = blobs_all[type].blobs;
fw_count = blobs_all[type].count;
if (!fw_count)
return true;
/* make sure the list is ordered as expected */
for (i = 1; i < fw_count; i++) {
/* Versionless file names must be unique per platform: */
for (j = i + 1; j < fw_count; j++) {
/* Same platform? */
if (fw_blobs[i].p != fw_blobs[j].p)
continue;
if (fw_blobs[i].blob.path != fw_blobs[j].blob.path)
continue;
drm_err(&i915->drm, "Duplicate %s blobs: %s r%u %s%d.%d.%d [%s] matches %s%d.%d.%d [%s]\n",
intel_uc_fw_type_repr(type),
intel_platform_name(fw_blobs[j].p), fw_blobs[j].rev,
fw_blobs[j].blob.legacy ? "L" : "v",
fw_blobs[j].blob.major, fw_blobs[j].blob.minor,
fw_blobs[j].blob.patch, fw_blobs[j].blob.path,
fw_blobs[i].blob.legacy ? "L" : "v",
fw_blobs[i].blob.major, fw_blobs[i].blob.minor,
fw_blobs[i].blob.patch, fw_blobs[i].blob.path);
}
/* Next platform is good: */
if (fw_blobs[i].p < fw_blobs[i - 1].p)
continue;
/* Next platform revision is good: */
if (fw_blobs[i].p == fw_blobs[i - 1].p &&
fw_blobs[i].rev < fw_blobs[i - 1].rev)
continue;
/* Platform/revision must be in order: */
if (fw_blobs[i].p != fw_blobs[i - 1].p ||
fw_blobs[i].rev != fw_blobs[i - 1].rev)
goto bad;
/* Next major version is good: */
if (fw_blobs[i].blob.major < fw_blobs[i - 1].blob.major)
continue;
/* New must be before legacy: */
if (!fw_blobs[i].blob.legacy && fw_blobs[i - 1].blob.legacy)
goto bad;
/* New to legacy also means 0.0 to X.Y (HuC), or X.0 to X.Y (GuC) */
if (fw_blobs[i].blob.legacy && !fw_blobs[i - 1].blob.legacy) {
if (!fw_blobs[i - 1].blob.major)
continue;
if (fw_blobs[i].blob.major == fw_blobs[i - 1].blob.major)
continue;
}
/* Major versions must be in order: */
if (fw_blobs[i].blob.major != fw_blobs[i - 1].blob.major)
goto bad;
/* Next minor version is good: */
if (fw_blobs[i].blob.minor < fw_blobs[i - 1].blob.minor)
continue;
/* Minor versions must be in order: */
if (fw_blobs[i].blob.minor != fw_blobs[i - 1].blob.minor)
goto bad;
/* Patch versions must be in order and unique: */
if (fw_blobs[i].blob.patch < fw_blobs[i - 1].blob.patch)
continue;
bad:
drm_err(&i915->drm, "Invalid %s blob order: %s r%u %s%d.%d.%d comes before %s r%u %s%d.%d.%d\n",
intel_uc_fw_type_repr(type),
intel_platform_name(fw_blobs[i - 1].p), fw_blobs[i - 1].rev,
fw_blobs[i - 1].blob.legacy ? "L" : "v",
fw_blobs[i - 1].blob.major,
fw_blobs[i - 1].blob.minor,
fw_blobs[i - 1].blob.patch,
intel_platform_name(fw_blobs[i].p), fw_blobs[i].rev,
fw_blobs[i].blob.legacy ? "L" : "v",
fw_blobs[i].blob.major,
fw_blobs[i].blob.minor,
fw_blobs[i].blob.patch);
return false;
}
return true;
}
static const char *__override_guc_firmware_path(struct drm_i915_private *i915)
{
if (i915->params.enable_guc & ENABLE_GUC_MASK)
return i915->params.guc_firmware_path;
return "";
}
static const char *__override_huc_firmware_path(struct drm_i915_private *i915)
{
if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC)
return i915->params.huc_firmware_path;
return "";
}
static const char *__override_gsc_firmware_path(struct drm_i915_private *i915)
{
return i915->params.gsc_firmware_path;
}
static void __uc_fw_user_override(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
{
const char *path = NULL;
switch (uc_fw->type) {
case INTEL_UC_FW_TYPE_GUC:
path = __override_guc_firmware_path(i915);
break;
case INTEL_UC_FW_TYPE_HUC:
path = __override_huc_firmware_path(i915);
break;
case INTEL_UC_FW_TYPE_GSC:
path = __override_gsc_firmware_path(i915);
break;
}
if (unlikely(path)) {
uc_fw->file_selected.path = path;
uc_fw->user_overridden = true;
}
}
void intel_uc_fw_version_from_gsc_manifest(struct intel_uc_fw_ver *ver,
const void *data)
{
const struct intel_gsc_manifest_header *manifest = data;
ver->major = manifest->fw_version.major;
ver->minor = manifest->fw_version.minor;
ver->patch = manifest->fw_version.hotfix;
ver->build = manifest->fw_version.build;
}
/**
* intel_uc_fw_init_early - initialize the uC object and select the firmware
* @uc_fw: uC firmware
* @type: type of uC
* @needs_ggtt_mapping: whether the FW needs to be GGTT mapped for loading
*
* Initialize the state of our uC object and relevant tracking and select the
* firmware to fetch and load.
*/
void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
enum intel_uc_fw_type type,
bool needs_ggtt_mapping)
{
struct intel_gt *gt = ____uc_fw_to_gt(uc_fw, type);
struct drm_i915_private *i915 = gt->i915;
/*
* we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status
* before we're looked at the HW caps to see if we have uc support
*/
BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED);
GEM_BUG_ON(uc_fw->status);
GEM_BUG_ON(uc_fw->file_selected.path);
uc_fw->type = type;
uc_fw->needs_ggtt_mapping = needs_ggtt_mapping;
if (HAS_GT_UC(i915)) {
if (!validate_fw_table_type(i915, type)) {
gt->uc.fw_table_invalid = true;
intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_NOT_SUPPORTED);
return;
}
__uc_fw_auto_select(i915, uc_fw);
__uc_fw_user_override(i915, uc_fw);
}
intel_uc_fw_change_status(uc_fw, uc_fw->file_selected.path ? *uc_fw->file_selected.path ?
INTEL_UC_FIRMWARE_SELECTED :
INTEL_UC_FIRMWARE_DISABLED :
INTEL_UC_FIRMWARE_NOT_SUPPORTED);
}
static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e)
{
struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
bool user = e == -EINVAL;
if (i915_inject_probe_error(i915, e)) {
/* non-existing blob */
uc_fw->file_selected.path = "<invalid>";
uc_fw->user_overridden = user;
} else if (i915_inject_probe_error(i915, e)) {
/* require next major version */
uc_fw->file_wanted.ver.major += 1;
uc_fw->file_wanted.ver.minor = 0;
uc_fw->user_overridden = user;
} else if (i915_inject_probe_error(i915, e)) {
/* require next minor version */
uc_fw->file_wanted.ver.minor += 1;
uc_fw->user_overridden = user;
} else if (uc_fw->file_wanted.ver.major &&
i915_inject_probe_error(i915, e)) {
/* require prev major version */
uc_fw->file_wanted.ver.major -= 1;
uc_fw->file_wanted.ver.minor = 0;
uc_fw->user_overridden = user;
} else if (uc_fw->file_wanted.ver.minor &&
i915_inject_probe_error(i915, e)) {
/* require prev minor version - hey, this should work! */
uc_fw->file_wanted.ver.minor -= 1;
uc_fw->user_overridden = user;
} else if (user && i915_inject_probe_error(i915, e)) {
/* officially unsupported platform */
uc_fw->file_wanted.ver.major = 0;
uc_fw->file_wanted.ver.minor = 0;
uc_fw->user_overridden = true;
}
}
static void uc_unpack_css_version(struct intel_uc_fw_ver *ver, u32 css_value)
{
/* Get version numbers from the CSS header */
ver->major = FIELD_GET(CSS_SW_VERSION_UC_MAJOR, css_value);
ver->minor = FIELD_GET(CSS_SW_VERSION_UC_MINOR, css_value);
ver->patch = FIELD_GET(CSS_SW_VERSION_UC_PATCH, css_value);
}
static void guc_read_css_info(struct intel_uc_fw *uc_fw, struct uc_css_header *css)
{
struct intel_guc *guc = container_of(uc_fw, struct intel_guc, fw);
/*
* The GuC firmware includes an extra version number to specify the
* submission API level. This allows submission code to work with
* multiple GuC versions without having to know the absolute firmware
* version number (there are likely to be multiple firmware releases
* which all support the same submission API level).
*
* Note that the spec for the CSS header defines this version number
* as 'vf_version' as it was originally intended for virtualisation.
* However, it is applicable to native submission as well.
*
* Unfortunately, due to an oversight, this version number was only
* exposed in the CSS header from v70.6.0.
*/
if (uc_fw->file_selected.ver.major >= 70) {
if (uc_fw->file_selected.ver.minor >= 6) {
/* v70.6.0 adds CSS header support */
uc_unpack_css_version(&guc->submission_version, css->vf_version);
} else if (uc_fw->file_selected.ver.minor >= 3) {
/* v70.3.0 introduced v1.1.0 */
guc->submission_version.major = 1;
guc->submission_version.minor = 1;
guc->submission_version.patch = 0;
} else {
/* v70.0.0 introduced v1.0.0 */
guc->submission_version.major = 1;
guc->submission_version.minor = 0;
guc->submission_version.patch = 0;
}
} else if (uc_fw->file_selected.ver.major >= 69) {
/* v69.0.0 introduced v0.10.0 */
guc->submission_version.major = 0;
guc->submission_version.minor = 10;
guc->submission_version.patch = 0;
} else {
/* Prior versions were v0.1.0 */
guc->submission_version.major = 0;
guc->submission_version.minor = 1;
guc->submission_version.patch = 0;
}
uc_fw->private_data_size = css->private_data_size;
}
static int __check_ccs_header(struct intel_gt *gt,
const void *fw_data, size_t fw_size,
struct intel_uc_fw *uc_fw)
{
struct uc_css_header *css;
size_t size;
/* Check the size of the blob before examining buffer contents */
if (unlikely(fw_size < sizeof(struct uc_css_header))) {
gt_warn(gt, "%s firmware %s: invalid size: %zu < %zu\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
fw_size, sizeof(struct uc_css_header));
return -ENODATA;
}
css = (struct uc_css_header *)fw_data;
/* Check integrity of size values inside CSS header */
size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
css->exponent_size_dw) * sizeof(u32);
if (unlikely(size != sizeof(struct uc_css_header))) {
gt_warn(gt, "%s firmware %s: unexpected header size: %zu != %zu\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
fw_size, sizeof(struct uc_css_header));
return -EPROTO;
}
/* uCode size must calculated from other sizes */
uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
/* now RSA */
uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
/* At least, it should have header, uCode and RSA. Size of all three. */
size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
if (unlikely(fw_size < size)) {
gt_warn(gt, "%s firmware %s: invalid size: %zu < %zu\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
fw_size, size);
return -ENOEXEC;
}
/* Sanity check whether this fw is not larger than whole WOPCM memory */
size = __intel_uc_fw_get_upload_size(uc_fw);
if (unlikely(size >= gt->wopcm.size)) {
gt_warn(gt, "%s firmware %s: invalid size: %zu > %zu\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
size, (size_t)gt->wopcm.size);
return -E2BIG;
}
uc_unpack_css_version(&uc_fw->file_selected.ver, css->sw_version);
if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
guc_read_css_info(uc_fw, css);
return 0;
}
static int check_gsc_manifest(struct intel_gt *gt,
const struct firmware *fw,
struct intel_uc_fw *uc_fw)
{
switch (uc_fw->type) {
case INTEL_UC_FW_TYPE_HUC:
intel_huc_fw_get_binary_info(uc_fw, fw->data, fw->size);
break;
case INTEL_UC_FW_TYPE_GSC:
intel_gsc_fw_get_binary_info(uc_fw, fw->data, fw->size);
break;
default:
MISSING_CASE(uc_fw->type);
return -EINVAL;
}
if (uc_fw->dma_start_offset) {
u32 delta = uc_fw->dma_start_offset;
__check_ccs_header(gt, fw->data + delta, fw->size - delta, uc_fw);
}
return 0;
}
static int check_ccs_header(struct intel_gt *gt,
const struct firmware *fw,
struct intel_uc_fw *uc_fw)
{
return __check_ccs_header(gt, fw->data, fw->size, uc_fw);
}
static bool is_ver_8bit(struct intel_uc_fw_ver *ver)
{
return ver->major < 0xFF && ver->minor < 0xFF && ver->patch < 0xFF;
}
static int guc_check_version_range(struct intel_uc_fw *uc_fw)
{
struct intel_guc *guc = container_of(uc_fw, struct intel_guc, fw);
struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
/*
* GuC version number components are defined as being 8-bits.
* The submission code relies on this to optimise version comparison
* tests. So enforce the restriction here.
*/
if (!is_ver_8bit(&uc_fw->file_selected.ver)) {
gt_warn(gt, "%s firmware: invalid file version: 0x%02X:%02X:%02X\n",
intel_uc_fw_type_repr(uc_fw->type),
uc_fw->file_selected.ver.major,
uc_fw->file_selected.ver.minor,
uc_fw->file_selected.ver.patch);
return -EINVAL;
}
if (!is_ver_8bit(&guc->submission_version)) {
gt_warn(gt, "%s firmware: invalid submit version: 0x%02X:%02X:%02X\n",
intel_uc_fw_type_repr(uc_fw->type),
guc->submission_version.major,
guc->submission_version.minor,
guc->submission_version.patch);
return -EINVAL;
}
return i915_inject_probe_error(gt->i915, -EINVAL);
}
static int check_fw_header(struct intel_gt *gt,
const struct firmware *fw,
struct intel_uc_fw *uc_fw)
{
int err = 0;
if (uc_fw->has_gsc_headers)
err = check_gsc_manifest(gt, fw, uc_fw);
else
err = check_ccs_header(gt, fw, uc_fw);
if (err)
return err;
return 0;
}
static int try_firmware_load(struct intel_uc_fw *uc_fw, const struct firmware **fw)
{
struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
struct device *dev = gt->i915->drm.dev;
int err;
err = firmware_request_nowarn(fw, uc_fw->file_selected.path, dev);
if (err)
return err;
if (uc_fw->needs_ggtt_mapping && (*fw)->size > INTEL_UC_RSVD_GGTT_PER_FW) {
gt_err(gt, "%s firmware %s: size (%zuKB) exceeds max supported size (%uKB)\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
(*fw)->size / SZ_1K, INTEL_UC_RSVD_GGTT_PER_FW / SZ_1K);
/* try to find another blob to load */
release_firmware(*fw);
*fw = NULL;
return -ENOENT;
}
return 0;
}
static int check_mtl_huc_guc_compatibility(struct intel_gt *gt,
struct intel_uc_fw_file *huc_selected)
{
struct intel_uc_fw_file *guc_selected = >->uc.guc.fw.file_selected;
struct intel_uc_fw_ver *huc_ver = &huc_selected->ver;
struct intel_uc_fw_ver *guc_ver = &guc_selected->ver;
bool new_huc, new_guc;
/* we can only do this check after having fetched both GuC and HuC */
GEM_BUG_ON(!huc_selected->path || !guc_selected->path);
/*
* Due to changes in the authentication flow for MTL, HuC 8.5.1 or newer
* requires GuC 70.7.0 or newer. Older HuC binaries will instead require
* GuC < 70.7.0.
*/
new_huc = huc_ver->major > 8 ||
(huc_ver->major == 8 && huc_ver->minor > 5) ||
(huc_ver->major == 8 && huc_ver->minor == 5 && huc_ver->patch >= 1);
new_guc = guc_ver->major > 70 ||
(guc_ver->major == 70 && guc_ver->minor >= 7);
if (new_huc != new_guc) {
UNEXPECTED(gt, "HuC %u.%u.%u is incompatible with GuC %u.%u.%u\n",
huc_ver->major, huc_ver->minor, huc_ver->patch,
guc_ver->major, guc_ver->minor, guc_ver->patch);
gt_info(gt, "MTL GuC 70.7.0+ and HuC 8.5.1+ don't work with older releases\n");
return -ENOEXEC;
}
return 0;
}
int intel_uc_check_file_version(struct intel_uc_fw *uc_fw, bool *old_ver)
{
struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
struct intel_uc_fw_file *wanted = &uc_fw->file_wanted;
struct intel_uc_fw_file *selected = &uc_fw->file_selected;
int ret;
/*
* MTL has some compatibility issues with early GuC/HuC binaries
* not working with newer ones. This is specific to MTL and we
* don't expect it to extend to other platforms.
*/
if (IS_METEORLAKE(gt->i915) && uc_fw->type == INTEL_UC_FW_TYPE_HUC) {
ret = check_mtl_huc_guc_compatibility(gt, selected);
if (ret)
return ret;
}
if (!wanted->ver.major || !selected->ver.major)
return 0;
/* Check the file's major version was as it claimed */
if (selected->ver.major != wanted->ver.major) {
UNEXPECTED(gt, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
intel_uc_fw_type_repr(uc_fw->type), selected->path,
selected->ver.major, selected->ver.minor,
wanted->ver.major, wanted->ver.minor);
if (!intel_uc_fw_is_overridden(uc_fw))
return -ENOEXEC;
} else if (old_ver) {
if (selected->ver.minor < wanted->ver.minor)
*old_ver = true;
else if ((selected->ver.minor == wanted->ver.minor) &&
(selected->ver.patch < wanted->ver.patch))
*old_ver = true;
}
return 0;
}
/**
* intel_uc_fw_fetch - fetch uC firmware
* @uc_fw: uC firmware
*
* Fetch uC firmware into GEM obj.
*
* Return: 0 on success, a negative errno code on failure.
*/
int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
{
struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
struct drm_i915_private *i915 = gt->i915;
struct intel_uc_fw_file file_ideal;
struct drm_i915_gem_object *obj;
const struct firmware *fw = NULL;
bool old_ver = false;
int err;
GEM_BUG_ON(!gt->wopcm.size);
GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw));
err = i915_inject_probe_error(i915, -ENXIO);
if (err)
goto fail;
__force_fw_fetch_failures(uc_fw, -EINVAL);
__force_fw_fetch_failures(uc_fw, -ESTALE);
err = try_firmware_load(uc_fw, &fw);
memcpy(&file_ideal, &uc_fw->file_wanted, sizeof(file_ideal));
/* Any error is terminal if overriding. Don't bother searching for older versions */
if (err && intel_uc_fw_is_overridden(uc_fw))
goto fail;
while (err == -ENOENT) {
old_ver = true;
__uc_fw_auto_select(i915, uc_fw);
if (!uc_fw->file_selected.path) {
/*
* No more options! But set the path back to something
* valid just in case it gets dereferenced.
*/
uc_fw->file_selected.path = file_ideal.path;
/* Also, preserve the version that was really wanted */
memcpy(&uc_fw->file_wanted, &file_ideal, sizeof(uc_fw->file_wanted));
break;
}
err = try_firmware_load(uc_fw, &fw);
}
if (err)
goto fail;
err = check_fw_header(gt, fw, uc_fw);
if (err)
goto fail;
if (uc_fw->type == INTEL_UC_FW_TYPE_GUC) {
err = guc_check_version_range(uc_fw);
if (err)
goto fail;
}
err = intel_uc_check_file_version(uc_fw, &old_ver);
if (err)
goto fail;
if (old_ver && uc_fw->file_selected.ver.major) {
/* Preserve the version that was really wanted */
memcpy(&uc_fw->file_wanted, &file_ideal, sizeof(uc_fw->file_wanted));
UNEXPECTED(gt, "%s firmware %s (%d.%d.%d) is recommended, but only %s (%d.%d.%d) was found\n",
intel_uc_fw_type_repr(uc_fw->type),
uc_fw->file_wanted.path,
uc_fw->file_wanted.ver.major,
uc_fw->file_wanted.ver.minor,
uc_fw->file_wanted.ver.patch,
uc_fw->file_selected.path,
uc_fw->file_selected.ver.major,
uc_fw->file_selected.ver.minor,
uc_fw->file_selected.ver.patch);
gt_info(gt, "Consider updating your linux-firmware pkg or downloading from %s\n",
INTEL_UC_FIRMWARE_URL);
}
if (HAS_LMEM(i915)) {
obj = i915_gem_object_create_lmem_from_data(i915, fw->data, fw->size);
if (!IS_ERR(obj))
obj->flags |= I915_BO_ALLOC_PM_EARLY;
} else {
obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
}
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto fail;
}
uc_fw->obj = obj;
uc_fw->size = fw->size;
intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
release_firmware(fw);
return 0;
fail:
intel_uc_fw_change_status(uc_fw, err == -ENOENT ?
INTEL_UC_FIRMWARE_MISSING :
INTEL_UC_FIRMWARE_ERROR);
gt_probe_error(gt, "%s firmware %s: fetch failed %pe\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, ERR_PTR(err));
gt_info(gt, "%s firmware(s) can be downloaded from %s\n",
intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
release_firmware(fw); /* OK even if fw is NULL */
return err;
}
static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw)
{
struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
struct i915_ggtt *ggtt = gt->ggtt;
struct drm_mm_node *node = &ggtt->uc_fw;
u32 offset = uc_fw->type * INTEL_UC_RSVD_GGTT_PER_FW;
/*
* The media GT shares the GGTT with the root GT, which means that
* we need to use different offsets for the binaries on the media GT.
* To keep the math simple, we use 8MB for the root tile and 8MB for
* the media one. This will need to be updated if we ever have more
* than 1 media GT.
*/
BUILD_BUG_ON(INTEL_UC_FW_NUM_TYPES * INTEL_UC_RSVD_GGTT_PER_FW > SZ_8M);
GEM_BUG_ON(gt->type == GT_MEDIA && gt->info.id > 1);
if (gt->type == GT_MEDIA)
offset += SZ_8M;
GEM_BUG_ON(!drm_mm_node_allocated(node));
GEM_BUG_ON(upper_32_bits(node->start));
GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
GEM_BUG_ON(offset + uc_fw->obj->base.size > node->size);
GEM_BUG_ON(uc_fw->obj->base.size > INTEL_UC_RSVD_GGTT_PER_FW);
return lower_32_bits(node->start + offset);
}
static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
{
struct drm_i915_gem_object *obj = uc_fw->obj;
struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
struct i915_vma_resource *vma_res = &uc_fw->vma_res;
u32 pte_flags = 0;
if (!uc_fw->needs_ggtt_mapping)
return;
vma_res->start = uc_fw_ggtt_offset(uc_fw);
vma_res->node_size = obj->base.size;
vma_res->bi.pages = obj->mm.pages;
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
/* uc_fw->obj cache domains were not controlled across suspend */
if (i915_gem_object_has_struct_page(obj))
drm_clflush_sg(vma_res->bi.pages);
if (i915_gem_object_is_lmem(obj))
pte_flags |= PTE_LM;
if (ggtt->vm.raw_insert_entries)
ggtt->vm.raw_insert_entries(&ggtt->vm, vma_res,
i915_gem_get_pat_index(ggtt->vm.i915,
I915_CACHE_NONE),
pte_flags);
else
ggtt->vm.insert_entries(&ggtt->vm, vma_res,
i915_gem_get_pat_index(ggtt->vm.i915,
I915_CACHE_NONE),
pte_flags);
}
static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw)
{
struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
struct i915_vma_resource *vma_res = &uc_fw->vma_res;
if (!vma_res->node_size)
return;
ggtt->vm.clear_range(&ggtt->vm, vma_res->start, vma_res->node_size);
}
static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
{
struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
struct intel_uncore *uncore = gt->uncore;
u64 offset;
int ret;
ret = i915_inject_probe_error(gt->i915, -ETIMEDOUT);
if (ret)
return ret;
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
/* Set the source address for the uCode */
offset = uc_fw->vma_res.start + uc_fw->dma_start_offset;
GEM_BUG_ON(upper_32_bits(offset) & 0xFFFF0000);
intel_uncore_write_fw(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset));
intel_uncore_write_fw(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset));
/* Set the DMA destination */
intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, dst_offset);
intel_uncore_write_fw(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
/*
* Set the transfer size. The header plus uCode will be copied to WOPCM
* via DMA, excluding any other components
*/
intel_uncore_write_fw(uncore, DMA_COPY_SIZE,
sizeof(struct uc_css_header) + uc_fw->ucode_size);
/* Start the DMA */
intel_uncore_write_fw(uncore, DMA_CTRL,
_MASKED_BIT_ENABLE(dma_flags | START_DMA));
/* Wait for DMA to finish */
ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
if (ret)
gt_err(gt, "DMA for %s fw failed, DMA_CTRL=%u\n",
intel_uc_fw_type_repr(uc_fw->type),
intel_uncore_read_fw(uncore, DMA_CTRL));
/* Disable the bits once DMA is over */
intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
return ret;
}
int intel_uc_fw_mark_load_failed(struct intel_uc_fw *uc_fw, int err)
{
struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
GEM_BUG_ON(!intel_uc_fw_is_loadable(uc_fw));
gt_probe_error(gt, "Failed to load %s firmware %s %pe\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path, ERR_PTR(err));
intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
return err;
}
/**
* intel_uc_fw_upload - load uC firmware using custom loader
* @uc_fw: uC firmware
* @dst_offset: destination offset
* @dma_flags: flags for flags for dma ctrl
*
* Loads uC firmware and updates internal flags.
*
* Return: 0 on success, non-zero on failure.
*/
int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
{
struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
int err;
/* make sure the status was cleared the last time we reset the uc */
GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
err = i915_inject_probe_error(gt->i915, -ENOEXEC);
if (err)
return err;
if (!intel_uc_fw_is_loadable(uc_fw))
return -ENOEXEC;
/* Call custom loader */
err = uc_fw_xfer(uc_fw, dst_offset, dma_flags);
if (err)
goto fail;
intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
return 0;
fail:
return intel_uc_fw_mark_load_failed(uc_fw, err);
}
static inline bool uc_fw_need_rsa_in_memory(struct intel_uc_fw *uc_fw)
{
/*
* The HW reads the GuC RSA from memory if the key size is > 256 bytes,
* while it reads it from the 64 RSA registers if it is smaller.
* The HuC RSA is always read from memory.
*/
return uc_fw->type == INTEL_UC_FW_TYPE_HUC || uc_fw->rsa_size > 256;
}
static int uc_fw_rsa_data_create(struct intel_uc_fw *uc_fw)
{
struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
struct i915_vma *vma;
size_t copied;
void *vaddr;
int err;
err = i915_inject_probe_error(gt->i915, -ENXIO);
if (err)
return err;
if (!uc_fw_need_rsa_in_memory(uc_fw))
return 0;
/*
* uC firmwares will sit above GUC_GGTT_TOP and will not map through
* GGTT. Unfortunately, this means that the GuC HW cannot perform the uC
* authentication from memory, as the RSA offset now falls within the
* GuC inaccessible range. We resort to perma-pinning an additional vma
* within the accessible range that only contains the RSA signature.
* The GuC HW can use this extra pinning to perform the authentication
* since its GGTT offset will be GuC accessible.
*/
GEM_BUG_ON(uc_fw->rsa_size > PAGE_SIZE);
vma = intel_guc_allocate_vma(>->uc.guc, PAGE_SIZE);
if (IS_ERR(vma))
return PTR_ERR(vma);
vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
intel_gt_coherent_map_type(gt, vma->obj, true));
if (IS_ERR(vaddr)) {
i915_vma_unpin_and_release(&vma, 0);
err = PTR_ERR(vaddr);
goto unpin_out;
}
copied = intel_uc_fw_copy_rsa(uc_fw, vaddr, vma->size);
i915_gem_object_unpin_map(vma->obj);
if (copied < uc_fw->rsa_size) {
err = -ENOMEM;
goto unpin_out;
}
uc_fw->rsa_data = vma;
return 0;
unpin_out:
i915_vma_unpin_and_release(&vma, 0);
return err;
}
static void uc_fw_rsa_data_destroy(struct intel_uc_fw *uc_fw)
{
i915_vma_unpin_and_release(&uc_fw->rsa_data, 0);
}
int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
{
int err;
/* this should happen before the load! */
GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
if (!intel_uc_fw_is_available(uc_fw))
return -ENOEXEC;
err = i915_gem_object_pin_pages_unlocked(uc_fw->obj);
if (err) {
gt_dbg(__uc_fw_to_gt(uc_fw), "%s fw pin-pages failed %pe\n",
intel_uc_fw_type_repr(uc_fw->type), ERR_PTR(err));
goto out;
}
err = uc_fw_rsa_data_create(uc_fw);
if (err) {
gt_dbg(__uc_fw_to_gt(uc_fw), "%s fw rsa data creation failed %pe\n",
intel_uc_fw_type_repr(uc_fw->type), ERR_PTR(err));
goto out_unpin;
}
uc_fw_bind_ggtt(uc_fw);
return 0;
out_unpin:
i915_gem_object_unpin_pages(uc_fw->obj);
out:
return err;
}
void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
{
uc_fw_unbind_ggtt(uc_fw);
uc_fw_rsa_data_destroy(uc_fw);
if (i915_gem_object_has_pinned_pages(uc_fw->obj))
i915_gem_object_unpin_pages(uc_fw->obj);
intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
}
void intel_uc_fw_resume_mapping(struct intel_uc_fw *uc_fw)
{
if (!intel_uc_fw_is_available(uc_fw))
return;
if (!i915_gem_object_has_pinned_pages(uc_fw->obj))
return;
uc_fw_bind_ggtt(uc_fw);
}
/**
* intel_uc_fw_cleanup_fetch - cleanup uC firmware
* @uc_fw: uC firmware
*
* Cleans up uC firmware by releasing the firmware GEM obj.
*/
void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw)
{
if (!intel_uc_fw_is_available(uc_fw))
return;
i915_gem_object_put(fetch_and_zero(&uc_fw->obj));
intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_SELECTED);
}
/**
* intel_uc_fw_copy_rsa - copy fw RSA to buffer
*
* @uc_fw: uC firmware
* @dst: dst buffer
* @max_len: max number of bytes to copy
*
* Return: number of copied bytes.
*/
size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
{
struct intel_memory_region *mr = uc_fw->obj->mm.region;
u32 size = min_t(u32, uc_fw->rsa_size, max_len);
u32 offset = uc_fw->dma_start_offset + sizeof(struct uc_css_header) + uc_fw->ucode_size;
struct sgt_iter iter;
size_t count = 0;
int idx;
/* Called during reset handling, must be atomic [no fs_reclaim] */
GEM_BUG_ON(!intel_uc_fw_is_available(uc_fw));
idx = offset >> PAGE_SHIFT;
offset = offset_in_page(offset);
if (i915_gem_object_has_struct_page(uc_fw->obj)) {
struct page *page;
for_each_sgt_page(page, iter, uc_fw->obj->mm.pages) {
u32 len = min_t(u32, size, PAGE_SIZE - offset);
void *vaddr;
if (idx > 0) {
idx--;
continue;
}
vaddr = kmap_atomic(page);
memcpy(dst, vaddr + offset, len);
kunmap_atomic(vaddr);
offset = 0;
dst += len;
size -= len;
count += len;
if (!size)
break;
}
} else {
dma_addr_t addr;
for_each_sgt_daddr(addr, iter, uc_fw->obj->mm.pages) {
u32 len = min_t(u32, size, PAGE_SIZE - offset);
void __iomem *vaddr;
if (idx > 0) {
idx--;
continue;
}
vaddr = io_mapping_map_atomic_wc(&mr->iomap,
addr - mr->region.start);
memcpy_fromio(dst, vaddr + offset, len);
io_mapping_unmap_atomic(vaddr);
offset = 0;
dst += len;
size -= len;
count += len;
if (!size)
break;
}
}
return count;
}
/**
* intel_uc_fw_dump - dump information about uC firmware
* @uc_fw: uC firmware
* @p: the &drm_printer
*
* Pretty printer for uC firmware.
*/
void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
{
bool got_wanted;
drm_printf(p, "%s firmware: %s\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path);
if (uc_fw->file_selected.path != uc_fw->file_wanted.path)
drm_printf(p, "%s firmware wanted: %s\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_wanted.path);
drm_printf(p, "\tstatus: %s\n",
intel_uc_fw_status_repr(uc_fw->status));
if (uc_fw->file_selected.ver.major < uc_fw->file_wanted.ver.major)
got_wanted = false;
else if ((uc_fw->file_selected.ver.major == uc_fw->file_wanted.ver.major) &&
(uc_fw->file_selected.ver.minor < uc_fw->file_wanted.ver.minor))
got_wanted = false;
else if ((uc_fw->file_selected.ver.major == uc_fw->file_wanted.ver.major) &&
(uc_fw->file_selected.ver.minor == uc_fw->file_wanted.ver.minor) &&
(uc_fw->file_selected.ver.patch < uc_fw->file_wanted.ver.patch))
got_wanted = false;
else
got_wanted = true;
if (!got_wanted)
drm_printf(p, "\tversion: wanted %u.%u.%u, found %u.%u.%u\n",
uc_fw->file_wanted.ver.major,
uc_fw->file_wanted.ver.minor,
uc_fw->file_wanted.ver.patch,
uc_fw->file_selected.ver.major,
uc_fw->file_selected.ver.minor,
uc_fw->file_selected.ver.patch);
else
drm_printf(p, "\tversion: found %u.%u.%u\n",
uc_fw->file_selected.ver.major,
uc_fw->file_selected.ver.minor,
uc_fw->file_selected.ver.patch);
drm_printf(p, "\tuCode: %u bytes\n", uc_fw->ucode_size);
drm_printf(p, "\tRSA: %u bytes\n", uc_fw->rsa_size);
}
| linux-master | drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2022 Intel Corporation
*/
#include "gem/i915_gem_lmem.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_print.h"
#include "gt/intel_ring.h"
#include "intel_gsc_binary_headers.h"
#include "intel_gsc_fw.h"
#include "intel_gsc_uc_heci_cmd_submit.h"
#include "i915_reg.h"
static bool gsc_is_in_reset(struct intel_uncore *uncore)
{
u32 fw_status = intel_uncore_read(uncore, HECI_FWSTS(MTL_GSC_HECI1_BASE, 1));
return REG_FIELD_GET(HECI1_FWSTS1_CURRENT_STATE, fw_status) ==
HECI1_FWSTS1_CURRENT_STATE_RESET;
}
static u32 gsc_uc_get_fw_status(struct intel_uncore *uncore, bool needs_wakeref)
{
intel_wakeref_t wakeref;
u32 fw_status = 0;
if (needs_wakeref)
wakeref = intel_runtime_pm_get(uncore->rpm);
fw_status = intel_uncore_read(uncore, HECI_FWSTS(MTL_GSC_HECI1_BASE, 1));
if (needs_wakeref)
intel_runtime_pm_put(uncore->rpm, wakeref);
return fw_status;
}
bool intel_gsc_uc_fw_proxy_init_done(struct intel_gsc_uc *gsc, bool needs_wakeref)
{
return REG_FIELD_GET(HECI1_FWSTS1_CURRENT_STATE,
gsc_uc_get_fw_status(gsc_uc_to_gt(gsc)->uncore,
needs_wakeref)) ==
HECI1_FWSTS1_PROXY_STATE_NORMAL;
}
int intel_gsc_uc_fw_proxy_get_status(struct intel_gsc_uc *gsc)
{
if (!(IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY)))
return -ENODEV;
if (!intel_uc_fw_is_loadable(&gsc->fw))
return -ENODEV;
if (__intel_uc_fw_status(&gsc->fw) == INTEL_UC_FIRMWARE_LOAD_FAIL)
return -ENOLINK;
if (!intel_gsc_uc_fw_proxy_init_done(gsc, true))
return -EAGAIN;
return 0;
}
bool intel_gsc_uc_fw_init_done(struct intel_gsc_uc *gsc)
{
return gsc_uc_get_fw_status(gsc_uc_to_gt(gsc)->uncore, false) &
HECI1_FWSTS1_INIT_COMPLETE;
}
static inline u32 cpd_entry_offset(const struct intel_gsc_cpd_entry *entry)
{
return entry->offset & INTEL_GSC_CPD_ENTRY_OFFSET_MASK;
}
int intel_gsc_fw_get_binary_info(struct intel_uc_fw *gsc_fw, const void *data, size_t size)
{
struct intel_gsc_uc *gsc = container_of(gsc_fw, struct intel_gsc_uc, fw);
struct intel_gt *gt = gsc_uc_to_gt(gsc);
const struct intel_gsc_layout_pointers *layout = data;
const struct intel_gsc_bpdt_header *bpdt_header = NULL;
const struct intel_gsc_bpdt_entry *bpdt_entry = NULL;
const struct intel_gsc_cpd_header_v2 *cpd_header = NULL;
const struct intel_gsc_cpd_entry *cpd_entry = NULL;
const struct intel_gsc_manifest_header *manifest;
size_t min_size = sizeof(*layout);
int i;
if (size < min_size) {
gt_err(gt, "GSC FW too small! %zu < %zu\n", size, min_size);
return -ENODATA;
}
/*
* The GSC binary starts with the pointer layout, which contains the
* locations of the various partitions of the binary. The one we're
* interested in to get the version is the boot1 partition, where we can
* find a BPDT header followed by entries, one of which points to the
* RBE sub-section of the partition. From here, we can parse the CPD
* header and the following entries to find the manifest location
* (entry identified by the "RBEP.man" name), from which we can finally
* extract the version.
*
* --------------------------------------------------
* [ intel_gsc_layout_pointers ]
* [ ... ]
* [ boot1.offset >---------------------------]------o
* [ ... ] |
* -------------------------------------------------- |
* |
* -------------------------------------------------- |
* [ intel_gsc_bpdt_header ]<-----o
* --------------------------------------------------
* [ intel_gsc_bpdt_entry[] ]
* [ entry1 ]
* [ ... ]
* [ entryX ]
* [ type == GSC_RBE ]
* [ offset >-----------------------------]------o
* [ ... ] |
* -------------------------------------------------- |
* |
* -------------------------------------------------- |
* [ intel_gsc_cpd_header_v2 ]<-----o
* --------------------------------------------------
* [ intel_gsc_cpd_entry[] ]
* [ entry1 ]
* [ ... ]
* [ entryX ]
* [ "RBEP.man" ]
* [ ... ]
* [ offset >----------------------------]------o
* [ ... ] |
* -------------------------------------------------- |
* |
* -------------------------------------------------- |
* [ intel_gsc_manifest_header ]<-----o
* [ ... ]
* [ intel_gsc_version fw_version ]
* [ ... ]
* --------------------------------------------------
*/
min_size = layout->boot1.offset + layout->boot1.size;
if (size < min_size) {
gt_err(gt, "GSC FW too small for boot section! %zu < %zu\n",
size, min_size);
return -ENODATA;
}
min_size = sizeof(*bpdt_header);
if (layout->boot1.size < min_size) {
gt_err(gt, "GSC FW boot section too small for BPDT header: %u < %zu\n",
layout->boot1.size, min_size);
return -ENODATA;
}
bpdt_header = data + layout->boot1.offset;
if (bpdt_header->signature != INTEL_GSC_BPDT_HEADER_SIGNATURE) {
gt_err(gt, "invalid signature for BPDT header: 0x%08x!\n",
bpdt_header->signature);
return -EINVAL;
}
min_size += sizeof(*bpdt_entry) * bpdt_header->descriptor_count;
if (layout->boot1.size < min_size) {
gt_err(gt, "GSC FW boot section too small for BPDT entries: %u < %zu\n",
layout->boot1.size, min_size);
return -ENODATA;
}
bpdt_entry = (void *)bpdt_header + sizeof(*bpdt_header);
for (i = 0; i < bpdt_header->descriptor_count; i++, bpdt_entry++) {
if ((bpdt_entry->type & INTEL_GSC_BPDT_ENTRY_TYPE_MASK) !=
INTEL_GSC_BPDT_ENTRY_TYPE_GSC_RBE)
continue;
cpd_header = (void *)bpdt_header + bpdt_entry->sub_partition_offset;
min_size = bpdt_entry->sub_partition_offset + sizeof(*cpd_header);
break;
}
if (!cpd_header) {
gt_err(gt, "couldn't find CPD header in GSC binary!\n");
return -ENODATA;
}
if (layout->boot1.size < min_size) {
gt_err(gt, "GSC FW boot section too small for CPD header: %u < %zu\n",
layout->boot1.size, min_size);
return -ENODATA;
}
if (cpd_header->header_marker != INTEL_GSC_CPD_HEADER_MARKER) {
gt_err(gt, "invalid marker for CPD header in GSC bin: 0x%08x!\n",
cpd_header->header_marker);
return -EINVAL;
}
min_size += sizeof(*cpd_entry) * cpd_header->num_of_entries;
if (layout->boot1.size < min_size) {
gt_err(gt, "GSC FW boot section too small for CPD entries: %u < %zu\n",
layout->boot1.size, min_size);
return -ENODATA;
}
cpd_entry = (void *)cpd_header + cpd_header->header_length;
for (i = 0; i < cpd_header->num_of_entries; i++, cpd_entry++) {
if (strcmp(cpd_entry->name, "RBEP.man") == 0) {
manifest = (void *)cpd_header + cpd_entry_offset(cpd_entry);
intel_uc_fw_version_from_gsc_manifest(&gsc->release,
manifest);
gsc->security_version = manifest->security_version;
break;
}
}
return 0;
}
static int emit_gsc_fw_load(struct i915_request *rq, struct intel_gsc_uc *gsc)
{
u32 offset = i915_ggtt_offset(gsc->local);
u32 *cs;
cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = GSC_FW_LOAD;
*cs++ = lower_32_bits(offset);
*cs++ = upper_32_bits(offset);
*cs++ = (gsc->local->size / SZ_4K) | HECI1_FW_LIMIT_VALID;
intel_ring_advance(rq, cs);
return 0;
}
static int gsc_fw_load(struct intel_gsc_uc *gsc)
{
struct intel_context *ce = gsc->ce;
struct i915_request *rq;
int err;
if (!ce)
return -ENODEV;
rq = i915_request_create(ce);
if (IS_ERR(rq))
return PTR_ERR(rq);
if (ce->engine->emit_init_breadcrumb) {
err = ce->engine->emit_init_breadcrumb(rq);
if (err)
goto out_rq;
}
err = emit_gsc_fw_load(rq, gsc);
if (err)
goto out_rq;
err = ce->engine->emit_flush(rq, 0);
out_rq:
i915_request_get(rq);
if (unlikely(err))
i915_request_set_error_once(rq, err);
i915_request_add(rq);
if (!err && i915_request_wait(rq, 0, msecs_to_jiffies(500)) < 0)
err = -ETIME;
i915_request_put(rq);
if (err)
gt_err(gsc_uc_to_gt(gsc), "Request submission for GSC load failed %pe\n",
ERR_PTR(err));
return err;
}
static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc)
{
struct intel_gt *gt = gsc_uc_to_gt(gsc);
void *src;
if (!gsc->local)
return -ENODEV;
if (gsc->local->size < gsc->fw.size)
return -ENOSPC;
src = i915_gem_object_pin_map_unlocked(gsc->fw.obj,
intel_gt_coherent_map_type(gt, gsc->fw.obj, true));
if (IS_ERR(src))
return PTR_ERR(src);
memcpy_toio(gsc->local_vaddr, src, gsc->fw.size);
memset_io(gsc->local_vaddr + gsc->fw.size, 0, gsc->local->size - gsc->fw.size);
intel_guc_write_barrier(>->uc.guc);
i915_gem_object_unpin_map(gsc->fw.obj);
return 0;
}
static int gsc_fw_wait(struct intel_gt *gt)
{
return intel_wait_for_register(gt->uncore,
HECI_FWSTS(MTL_GSC_HECI1_BASE, 1),
HECI1_FWSTS1_INIT_COMPLETE,
HECI1_FWSTS1_INIT_COMPLETE,
500);
}
struct intel_gsc_mkhi_header {
u8 group_id;
#define MKHI_GROUP_ID_GFX_SRV 0x30
u8 command;
#define MKHI_GFX_SRV_GET_HOST_COMPATIBILITY_VERSION (0x42)
u8 reserved;
u8 result;
} __packed;
struct mtl_gsc_ver_msg_in {
struct intel_gsc_mtl_header header;
struct intel_gsc_mkhi_header mkhi;
} __packed;
struct mtl_gsc_ver_msg_out {
struct intel_gsc_mtl_header header;
struct intel_gsc_mkhi_header mkhi;
u16 proj_major;
u16 compat_major;
u16 compat_minor;
u16 reserved[5];
} __packed;
#define GSC_VER_PKT_SZ SZ_4K
static int gsc_fw_query_compatibility_version(struct intel_gsc_uc *gsc)
{
struct intel_gt *gt = gsc_uc_to_gt(gsc);
struct mtl_gsc_ver_msg_in *msg_in;
struct mtl_gsc_ver_msg_out *msg_out;
struct i915_vma *vma;
u64 offset;
void *vaddr;
int err;
err = intel_guc_allocate_and_map_vma(>->uc.guc, GSC_VER_PKT_SZ * 2,
&vma, &vaddr);
if (err) {
gt_err(gt, "failed to allocate vma for GSC version query\n");
return err;
}
offset = i915_ggtt_offset(vma);
msg_in = vaddr;
msg_out = vaddr + GSC_VER_PKT_SZ;
intel_gsc_uc_heci_cmd_emit_mtl_header(&msg_in->header,
HECI_MEADDRESS_MKHI,
sizeof(*msg_in), 0);
msg_in->mkhi.group_id = MKHI_GROUP_ID_GFX_SRV;
msg_in->mkhi.command = MKHI_GFX_SRV_GET_HOST_COMPATIBILITY_VERSION;
err = intel_gsc_uc_heci_cmd_submit_packet(>->uc.gsc,
offset,
sizeof(*msg_in),
offset + GSC_VER_PKT_SZ,
GSC_VER_PKT_SZ);
if (err) {
gt_err(gt,
"failed to submit GSC request for compatibility version: %d\n",
err);
goto out_vma;
}
if (msg_out->header.message_size != sizeof(*msg_out)) {
gt_err(gt, "invalid GSC reply length %u [expected %zu], s=0x%x, f=0x%x, r=0x%x\n",
msg_out->header.message_size, sizeof(*msg_out),
msg_out->header.status, msg_out->header.flags, msg_out->mkhi.result);
err = -EPROTO;
goto out_vma;
}
gsc->fw.file_selected.ver.major = msg_out->compat_major;
gsc->fw.file_selected.ver.minor = msg_out->compat_minor;
out_vma:
i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP);
return err;
}
int intel_gsc_uc_fw_upload(struct intel_gsc_uc *gsc)
{
struct intel_gt *gt = gsc_uc_to_gt(gsc);
struct intel_uc_fw *gsc_fw = &gsc->fw;
int err;
/* check current fw status */
if (intel_gsc_uc_fw_init_done(gsc)) {
if (GEM_WARN_ON(!intel_uc_fw_is_loaded(gsc_fw)))
intel_uc_fw_change_status(gsc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
return -EEXIST;
}
if (!intel_uc_fw_is_loadable(gsc_fw))
return -ENOEXEC;
/* FW blob is ok, so clean the status */
intel_uc_fw_sanitize(&gsc->fw);
if (!gsc_is_in_reset(gt->uncore))
return -EIO;
err = gsc_fw_load_prepare(gsc);
if (err)
goto fail;
/*
* GSC is only killed by an FLR, so we need to trigger one on unload to
* make sure we stop it. This is because we assign a chunk of memory to
* the GSC as part of the FW load , so we need to make sure it stops
* using it when we release it to the system on driver unload. Note that
* this is not a problem of the unload per-se, because the GSC will not
* touch that memory unless there are requests for it coming from the
* driver; therefore, no accesses will happen while i915 is not loaded,
* but if we re-load the driver then the GSC might wake up and try to
* access that old memory location again.
* Given that an FLR is a very disruptive action (see the FLR function
* for details), we want to do it as the last action before releasing
* the access to the MMIO bar, which means we need to do it as part of
* the primary uncore cleanup.
* An alternative approach to the FLR would be to use a memory location
* that survives driver unload, like e.g. stolen memory, and keep the
* GSC loaded across reloads. However, this requires us to make sure we
* preserve that memory location on unload and then determine and
* reserve its offset on each subsequent load, which is not trivial, so
* it is easier to just kill everything and start fresh.
*/
intel_uncore_set_flr_on_fini(>->i915->uncore);
err = gsc_fw_load(gsc);
if (err)
goto fail;
err = gsc_fw_wait(gt);
if (err)
goto fail;
err = gsc_fw_query_compatibility_version(gsc);
if (err)
goto fail;
/* we only support compatibility version 1.0 at the moment */
err = intel_uc_check_file_version(gsc_fw, NULL);
if (err)
goto fail;
/* FW is not fully operational until we enable SW proxy */
intel_uc_fw_change_status(gsc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
gt_info(gt, "Loaded GSC firmware %s (cv%u.%u, r%u.%u.%u.%u, svn %u)\n",
gsc_fw->file_selected.path,
gsc_fw->file_selected.ver.major, gsc_fw->file_selected.ver.minor,
gsc->release.major, gsc->release.minor,
gsc->release.patch, gsc->release.build,
gsc->security_version);
return 0;
fail:
return intel_uc_fw_mark_load_failed(gsc_fw, err);
}
| linux-master | drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2023 Intel Corporation
*/
#include <linux/pci.h>
#include <linux/pnp.h>
#include <drm/drm_managed.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_gmch.h"
#include "intel_pci_config.h"
static void intel_gmch_bridge_release(struct drm_device *dev, void *bridge)
{
pci_dev_put(bridge);
}
int intel_gmch_bridge_setup(struct drm_i915_private *i915)
{
int domain = pci_domain_nr(to_pci_dev(i915->drm.dev)->bus);
i915->gmch.pdev = pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
if (!i915->gmch.pdev) {
drm_err(&i915->drm, "bridge device not found\n");
return -EIO;
}
return drmm_add_action_or_reset(&i915->drm, intel_gmch_bridge_release,
i915->gmch.pdev);
}
/* Allocate space for the MCH regs if needed, return nonzero on error */
static int
intel_alloc_mchbar_resource(struct drm_i915_private *i915)
{
int reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp_lo, temp_hi = 0;
u64 mchbar_addr;
int ret;
if (GRAPHICS_VER(i915) >= 4)
pci_read_config_dword(i915->gmch.pdev, reg + 4, &temp_hi);
pci_read_config_dword(i915->gmch.pdev, reg, &temp_lo);
mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
/* If ACPI doesn't have it, assume we need to allocate it ourselves */
if (IS_ENABLED(CONFIG_PNP) && mchbar_addr &&
pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
return 0;
/* Get some space for it */
i915->gmch.mch_res.name = "i915 MCHBAR";
i915->gmch.mch_res.flags = IORESOURCE_MEM;
ret = pci_bus_alloc_resource(i915->gmch.pdev->bus,
&i915->gmch.mch_res,
MCHBAR_SIZE, MCHBAR_SIZE,
PCIBIOS_MIN_MEM,
0, pcibios_align_resource,
i915->gmch.pdev);
if (ret) {
drm_dbg(&i915->drm, "failed bus alloc: %d\n", ret);
i915->gmch.mch_res.start = 0;
return ret;
}
if (GRAPHICS_VER(i915) >= 4)
pci_write_config_dword(i915->gmch.pdev, reg + 4,
upper_32_bits(i915->gmch.mch_res.start));
pci_write_config_dword(i915->gmch.pdev, reg,
lower_32_bits(i915->gmch.mch_res.start));
return 0;
}
/* Setup MCHBAR if possible, return true if we should disable it again */
void intel_gmch_bar_setup(struct drm_i915_private *i915)
{
int mchbar_reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
bool enabled;
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
return;
i915->gmch.mchbar_need_disable = false;
if (IS_I915G(i915) || IS_I915GM(i915)) {
pci_read_config_dword(i915->gmch.pdev, DEVEN, &temp);
enabled = !!(temp & DEVEN_MCHBAR_EN);
} else {
pci_read_config_dword(i915->gmch.pdev, mchbar_reg, &temp);
enabled = temp & 1;
}
/* If it's already enabled, don't have to do anything */
if (enabled)
return;
if (intel_alloc_mchbar_resource(i915))
return;
i915->gmch.mchbar_need_disable = true;
/* Space is allocated or reserved, so enable it. */
if (IS_I915G(i915) || IS_I915GM(i915)) {
pci_write_config_dword(i915->gmch.pdev, DEVEN,
temp | DEVEN_MCHBAR_EN);
} else {
pci_read_config_dword(i915->gmch.pdev, mchbar_reg, &temp);
pci_write_config_dword(i915->gmch.pdev, mchbar_reg, temp | 1);
}
}
void intel_gmch_bar_teardown(struct drm_i915_private *i915)
{
int mchbar_reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
if (i915->gmch.mchbar_need_disable) {
if (IS_I915G(i915) || IS_I915GM(i915)) {
u32 deven_val;
pci_read_config_dword(i915->gmch.pdev, DEVEN,
&deven_val);
deven_val &= ~DEVEN_MCHBAR_EN;
pci_write_config_dword(i915->gmch.pdev, DEVEN,
deven_val);
} else {
u32 mchbar_val;
pci_read_config_dword(i915->gmch.pdev, mchbar_reg,
&mchbar_val);
mchbar_val &= ~1;
pci_write_config_dword(i915->gmch.pdev, mchbar_reg,
mchbar_val);
}
}
if (i915->gmch.mch_res.start)
release_resource(&i915->gmch.mch_res);
}
int intel_gmch_vga_set_state(struct drm_i915_private *i915, bool enable_decode)
{
unsigned int reg = DISPLAY_VER(i915) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
u16 gmch_ctrl;
if (pci_read_config_word(i915->gmch.pdev, reg, &gmch_ctrl)) {
drm_err(&i915->drm, "failed to read control word\n");
return -EIO;
}
if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !enable_decode)
return 0;
if (enable_decode)
gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
else
gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
if (pci_write_config_word(i915->gmch.pdev, reg, gmch_ctrl)) {
drm_err(&i915->drm, "failed to write control word\n");
return -EIO;
}
return 0;
}
| linux-master | drivers/gpu/drm/i915/soc/intel_gmch.c |
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
#include <linux/string_helpers.h>
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_dram.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
#include "vlv_sideband.h"
struct dram_dimm_info {
u16 size;
u8 width, ranks;
};
struct dram_channel_info {
struct dram_dimm_info dimm_l, dimm_s;
u8 ranks;
bool is_16gb_dimm;
};
#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
static const char *intel_dram_type_str(enum intel_dram_type type)
{
static const char * const str[] = {
DRAM_TYPE_STR(UNKNOWN),
DRAM_TYPE_STR(DDR3),
DRAM_TYPE_STR(DDR4),
DRAM_TYPE_STR(LPDDR3),
DRAM_TYPE_STR(LPDDR4),
};
if (type >= ARRAY_SIZE(str))
type = INTEL_DRAM_UNKNOWN;
return str[type];
}
#undef DRAM_TYPE_STR
static void pnv_detect_mem_freq(struct drm_i915_private *dev_priv)
{
u32 tmp;
tmp = intel_uncore_read(&dev_priv->uncore, CLKCFG);
switch (tmp & CLKCFG_FSB_MASK) {
case CLKCFG_FSB_533:
dev_priv->fsb_freq = 533; /* 133*4 */
break;
case CLKCFG_FSB_800:
dev_priv->fsb_freq = 800; /* 200*4 */
break;
case CLKCFG_FSB_667:
dev_priv->fsb_freq = 667; /* 167*4 */
break;
case CLKCFG_FSB_400:
dev_priv->fsb_freq = 400; /* 100*4 */
break;
}
switch (tmp & CLKCFG_MEM_MASK) {
case CLKCFG_MEM_533:
dev_priv->mem_freq = 533;
break;
case CLKCFG_MEM_667:
dev_priv->mem_freq = 667;
break;
case CLKCFG_MEM_800:
dev_priv->mem_freq = 800;
break;
}
/* detect pineview DDR3 setting */
tmp = intel_uncore_read(&dev_priv->uncore, CSHRDDR3CTL);
dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
}
static void ilk_detect_mem_freq(struct drm_i915_private *dev_priv)
{
u16 ddrpll, csipll;
ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1);
switch (ddrpll & 0xff) {
case 0xc:
dev_priv->mem_freq = 800;
break;
case 0x10:
dev_priv->mem_freq = 1066;
break;
case 0x14:
dev_priv->mem_freq = 1333;
break;
case 0x18:
dev_priv->mem_freq = 1600;
break;
default:
drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n",
ddrpll & 0xff);
dev_priv->mem_freq = 0;
break;
}
csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0);
switch (csipll & 0x3ff) {
case 0x00c:
dev_priv->fsb_freq = 3200;
break;
case 0x00e:
dev_priv->fsb_freq = 3733;
break;
case 0x010:
dev_priv->fsb_freq = 4266;
break;
case 0x012:
dev_priv->fsb_freq = 4800;
break;
case 0x014:
dev_priv->fsb_freq = 5333;
break;
case 0x016:
dev_priv->fsb_freq = 5866;
break;
case 0x018:
dev_priv->fsb_freq = 6400;
break;
default:
drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n",
csipll & 0x3ff);
dev_priv->fsb_freq = 0;
break;
}
}
static void chv_detect_mem_freq(struct drm_i915_private *i915)
{
u32 val;
vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_CCK));
val = vlv_cck_read(i915, CCK_FUSE_REG);
vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_CCK));
switch ((val >> 2) & 0x7) {
case 3:
i915->mem_freq = 2000;
break;
default:
i915->mem_freq = 1600;
break;
}
}
static void vlv_detect_mem_freq(struct drm_i915_private *i915)
{
u32 val;
vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_PUNIT));
val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_PUNIT));
switch ((val >> 6) & 3) {
case 0:
case 1:
i915->mem_freq = 800;
break;
case 2:
i915->mem_freq = 1066;
break;
case 3:
i915->mem_freq = 1333;
break;
}
}
static void detect_mem_freq(struct drm_i915_private *i915)
{
if (IS_PINEVIEW(i915))
pnv_detect_mem_freq(i915);
else if (GRAPHICS_VER(i915) == 5)
ilk_detect_mem_freq(i915);
else if (IS_CHERRYVIEW(i915))
chv_detect_mem_freq(i915);
else if (IS_VALLEYVIEW(i915))
vlv_detect_mem_freq(i915);
if (i915->mem_freq)
drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
}
static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
{
return dimm->ranks * 64 / (dimm->width ?: 1);
}
/* Returns total Gb for the whole DIMM */
static int skl_get_dimm_size(u16 val)
{
return (val & SKL_DRAM_SIZE_MASK) * 8;
}
static int skl_get_dimm_width(u16 val)
{
if (skl_get_dimm_size(val) == 0)
return 0;
switch (val & SKL_DRAM_WIDTH_MASK) {
case SKL_DRAM_WIDTH_X8:
case SKL_DRAM_WIDTH_X16:
case SKL_DRAM_WIDTH_X32:
val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
return 8 << val;
default:
MISSING_CASE(val);
return 0;
}
}
static int skl_get_dimm_ranks(u16 val)
{
if (skl_get_dimm_size(val) == 0)
return 0;
val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;
return val + 1;
}
/* Returns total Gb for the whole DIMM */
static int icl_get_dimm_size(u16 val)
{
return (val & ICL_DRAM_SIZE_MASK) * 8 / 2;
}
static int icl_get_dimm_width(u16 val)
{
if (icl_get_dimm_size(val) == 0)
return 0;
switch (val & ICL_DRAM_WIDTH_MASK) {
case ICL_DRAM_WIDTH_X8:
case ICL_DRAM_WIDTH_X16:
case ICL_DRAM_WIDTH_X32:
val = (val & ICL_DRAM_WIDTH_MASK) >> ICL_DRAM_WIDTH_SHIFT;
return 8 << val;
default:
MISSING_CASE(val);
return 0;
}
}
static int icl_get_dimm_ranks(u16 val)
{
if (icl_get_dimm_size(val) == 0)
return 0;
val = (val & ICL_DRAM_RANK_MASK) >> ICL_DRAM_RANK_SHIFT;
return val + 1;
}
static bool
skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
{
/* Convert total Gb to Gb per DRAM device */
return dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
}
static void
skl_dram_get_dimm_info(struct drm_i915_private *i915,
struct dram_dimm_info *dimm,
int channel, char dimm_name, u16 val)
{
if (GRAPHICS_VER(i915) >= 11) {
dimm->size = icl_get_dimm_size(val);
dimm->width = icl_get_dimm_width(val);
dimm->ranks = icl_get_dimm_ranks(val);
} else {
dimm->size = skl_get_dimm_size(val);
dimm->width = skl_get_dimm_width(val);
dimm->ranks = skl_get_dimm_ranks(val);
}
drm_dbg_kms(&i915->drm,
"CH%u DIMM %c size: %u Gb, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
str_yes_no(skl_is_16gb_dimm(dimm)));
}
static int
skl_dram_get_channel_info(struct drm_i915_private *i915,
struct dram_channel_info *ch,
int channel, u32 val)
{
skl_dram_get_dimm_info(i915, &ch->dimm_l,
channel, 'L', val & 0xffff);
skl_dram_get_dimm_info(i915, &ch->dimm_s,
channel, 'S', val >> 16);
if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
drm_dbg_kms(&i915->drm, "CH%u not populated\n", channel);
return -EINVAL;
}
if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
ch->ranks = 2;
else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
ch->ranks = 2;
else
ch->ranks = 1;
ch->is_16gb_dimm = skl_is_16gb_dimm(&ch->dimm_l) ||
skl_is_16gb_dimm(&ch->dimm_s);
drm_dbg_kms(&i915->drm, "CH%u ranks: %u, 16Gb DIMMs: %s\n",
channel, ch->ranks, str_yes_no(ch->is_16gb_dimm));
return 0;
}
static bool
intel_is_dram_symmetric(const struct dram_channel_info *ch0,
const struct dram_channel_info *ch1)
{
return !memcmp(ch0, ch1, sizeof(*ch0)) &&
(ch0->dimm_s.size == 0 ||
!memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
}
static int
skl_dram_get_channels_info(struct drm_i915_private *i915)
{
struct dram_info *dram_info = &i915->dram_info;
struct dram_channel_info ch0 = {}, ch1 = {};
u32 val;
int ret;
val = intel_uncore_read(&i915->uncore,
SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
ret = skl_dram_get_channel_info(i915, &ch0, 0, val);
if (ret == 0)
dram_info->num_channels++;
val = intel_uncore_read(&i915->uncore,
SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
ret = skl_dram_get_channel_info(i915, &ch1, 1, val);
if (ret == 0)
dram_info->num_channels++;
if (dram_info->num_channels == 0) {
drm_info(&i915->drm, "Number of memory channels is zero\n");
return -EINVAL;
}
if (ch0.ranks == 0 && ch1.ranks == 0) {
drm_info(&i915->drm, "couldn't get memory rank information\n");
return -EINVAL;
}
dram_info->wm_lv_0_adjust_needed = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
drm_dbg_kms(&i915->drm, "Memory configuration is symmetric? %s\n",
str_yes_no(dram_info->symmetric_memory));
return 0;
}
static enum intel_dram_type
skl_get_dram_type(struct drm_i915_private *i915)
{
u32 val;
val = intel_uncore_read(&i915->uncore,
SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);
switch (val & SKL_DRAM_DDR_TYPE_MASK) {
case SKL_DRAM_DDR_TYPE_DDR3:
return INTEL_DRAM_DDR3;
case SKL_DRAM_DDR_TYPE_DDR4:
return INTEL_DRAM_DDR4;
case SKL_DRAM_DDR_TYPE_LPDDR3:
return INTEL_DRAM_LPDDR3;
case SKL_DRAM_DDR_TYPE_LPDDR4:
return INTEL_DRAM_LPDDR4;
default:
MISSING_CASE(val);
return INTEL_DRAM_UNKNOWN;
}
}
static int
skl_get_dram_info(struct drm_i915_private *i915)
{
struct dram_info *dram_info = &i915->dram_info;
int ret;
dram_info->type = skl_get_dram_type(i915);
drm_dbg_kms(&i915->drm, "DRAM type: %s\n",
intel_dram_type_str(dram_info->type));
ret = skl_dram_get_channels_info(i915);
if (ret)
return ret;
return 0;
}
/* Returns Gb per DRAM device */
static int bxt_get_dimm_size(u32 val)
{
switch (val & BXT_DRAM_SIZE_MASK) {
case BXT_DRAM_SIZE_4GBIT:
return 4;
case BXT_DRAM_SIZE_6GBIT:
return 6;
case BXT_DRAM_SIZE_8GBIT:
return 8;
case BXT_DRAM_SIZE_12GBIT:
return 12;
case BXT_DRAM_SIZE_16GBIT:
return 16;
default:
MISSING_CASE(val);
return 0;
}
}
static int bxt_get_dimm_width(u32 val)
{
if (!bxt_get_dimm_size(val))
return 0;
val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
return 8 << val;
}
static int bxt_get_dimm_ranks(u32 val)
{
if (!bxt_get_dimm_size(val))
return 0;
switch (val & BXT_DRAM_RANK_MASK) {
case BXT_DRAM_RANK_SINGLE:
return 1;
case BXT_DRAM_RANK_DUAL:
return 2;
default:
MISSING_CASE(val);
return 0;
}
}
static enum intel_dram_type bxt_get_dimm_type(u32 val)
{
if (!bxt_get_dimm_size(val))
return INTEL_DRAM_UNKNOWN;
switch (val & BXT_DRAM_TYPE_MASK) {
case BXT_DRAM_TYPE_DDR3:
return INTEL_DRAM_DDR3;
case BXT_DRAM_TYPE_LPDDR3:
return INTEL_DRAM_LPDDR3;
case BXT_DRAM_TYPE_DDR4:
return INTEL_DRAM_DDR4;
case BXT_DRAM_TYPE_LPDDR4:
return INTEL_DRAM_LPDDR4;
default:
MISSING_CASE(val);
return INTEL_DRAM_UNKNOWN;
}
}
static void bxt_get_dimm_info(struct dram_dimm_info *dimm, u32 val)
{
dimm->width = bxt_get_dimm_width(val);
dimm->ranks = bxt_get_dimm_ranks(val);
/*
* Size in register is Gb per DRAM device. Convert to total
* Gb to match the way we report this for non-LP platforms.
*/
dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm);
}
static int bxt_get_dram_info(struct drm_i915_private *i915)
{
struct dram_info *dram_info = &i915->dram_info;
u32 val;
u8 valid_ranks = 0;
int i;
/*
* Now read each DUNIT8/9/10/11 to check the rank of each dimms.
*/
for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
struct dram_dimm_info dimm;
enum intel_dram_type type;
val = intel_uncore_read(&i915->uncore, BXT_D_CR_DRP0_DUNIT(i));
if (val == 0xFFFFFFFF)
continue;
dram_info->num_channels++;
bxt_get_dimm_info(&dimm, val);
type = bxt_get_dimm_type(val);
drm_WARN_ON(&i915->drm, type != INTEL_DRAM_UNKNOWN &&
dram_info->type != INTEL_DRAM_UNKNOWN &&
dram_info->type != type);
drm_dbg_kms(&i915->drm,
"CH%u DIMM size: %u Gb, width: X%u, ranks: %u, type: %s\n",
i - BXT_D_CR_DRP0_DUNIT_START,
dimm.size, dimm.width, dimm.ranks,
intel_dram_type_str(type));
if (valid_ranks == 0)
valid_ranks = dimm.ranks;
if (type != INTEL_DRAM_UNKNOWN)
dram_info->type = type;
}
if (dram_info->type == INTEL_DRAM_UNKNOWN || valid_ranks == 0) {
drm_info(&i915->drm, "couldn't get memory information\n");
return -EINVAL;
}
return 0;
}
static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv)
{
struct dram_info *dram_info = &dev_priv->dram_info;
u32 val = 0;
int ret;
ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
ICL_PCODE_MEM_SS_READ_GLOBAL_INFO, &val, NULL);
if (ret)
return ret;
if (GRAPHICS_VER(dev_priv) == 12) {
switch (val & 0xf) {
case 0:
dram_info->type = INTEL_DRAM_DDR4;
break;
case 1:
dram_info->type = INTEL_DRAM_DDR5;
break;
case 2:
dram_info->type = INTEL_DRAM_LPDDR5;
break;
case 3:
dram_info->type = INTEL_DRAM_LPDDR4;
break;
case 4:
dram_info->type = INTEL_DRAM_DDR3;
break;
case 5:
dram_info->type = INTEL_DRAM_LPDDR3;
break;
default:
MISSING_CASE(val & 0xf);
return -EINVAL;
}
} else {
switch (val & 0xf) {
case 0:
dram_info->type = INTEL_DRAM_DDR4;
break;
case 1:
dram_info->type = INTEL_DRAM_DDR3;
break;
case 2:
dram_info->type = INTEL_DRAM_LPDDR3;
break;
case 3:
dram_info->type = INTEL_DRAM_LPDDR4;
break;
default:
MISSING_CASE(val & 0xf);
return -EINVAL;
}
}
dram_info->num_channels = (val & 0xf0) >> 4;
dram_info->num_qgv_points = (val & 0xf00) >> 8;
dram_info->num_psf_gv_points = (val & 0x3000) >> 12;
return 0;
}
static int gen11_get_dram_info(struct drm_i915_private *i915)
{
int ret = skl_get_dram_info(i915);
if (ret)
return ret;
return icl_pcode_read_mem_global_info(i915);
}
static int gen12_get_dram_info(struct drm_i915_private *i915)
{
i915->dram_info.wm_lv_0_adjust_needed = false;
return icl_pcode_read_mem_global_info(i915);
}
static int xelpdp_get_dram_info(struct drm_i915_private *i915)
{
u32 val = intel_uncore_read(&i915->uncore, MTL_MEM_SS_INFO_GLOBAL);
struct dram_info *dram_info = &i915->dram_info;
switch (REG_FIELD_GET(MTL_DDR_TYPE_MASK, val)) {
case 0:
dram_info->type = INTEL_DRAM_DDR4;
break;
case 1:
dram_info->type = INTEL_DRAM_DDR5;
break;
case 2:
dram_info->type = INTEL_DRAM_LPDDR5;
break;
case 3:
dram_info->type = INTEL_DRAM_LPDDR4;
break;
case 4:
dram_info->type = INTEL_DRAM_DDR3;
break;
case 5:
dram_info->type = INTEL_DRAM_LPDDR3;
break;
default:
MISSING_CASE(val);
return -EINVAL;
}
dram_info->num_channels = REG_FIELD_GET(MTL_N_OF_POPULATED_CH_MASK, val);
dram_info->num_qgv_points = REG_FIELD_GET(MTL_N_OF_ENABLED_QGV_POINTS_MASK, val);
/* PSF GV points not supported in D14+ */
return 0;
}
void intel_dram_detect(struct drm_i915_private *i915)
{
struct dram_info *dram_info = &i915->dram_info;
int ret;
detect_mem_freq(i915);
if (GRAPHICS_VER(i915) < 9 || IS_DG2(i915) || !HAS_DISPLAY(i915))
return;
/*
* Assume level 0 watermark latency adjustment is needed until proven
* otherwise, this w/a is not needed by bxt/glk.
*/
dram_info->wm_lv_0_adjust_needed = !IS_GEN9_LP(i915);
if (DISPLAY_VER(i915) >= 14)
ret = xelpdp_get_dram_info(i915);
else if (GRAPHICS_VER(i915) >= 12)
ret = gen12_get_dram_info(i915);
else if (GRAPHICS_VER(i915) >= 11)
ret = gen11_get_dram_info(i915);
else if (IS_GEN9_LP(i915))
ret = bxt_get_dram_info(i915);
else
ret = skl_get_dram_info(i915);
if (ret)
return;
drm_dbg_kms(&i915->drm, "DRAM channels: %u\n", dram_info->num_channels);
drm_dbg_kms(&i915->drm, "Watermark level 0 adjustment needed: %s\n",
str_yes_no(dram_info->wm_lv_0_adjust_needed));
}
static u32 gen9_edram_size_mb(struct drm_i915_private *i915, u32 cap)
{
static const u8 ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
static const u8 sets[4] = { 1, 1, 2, 2 };
return EDRAM_NUM_BANKS(cap) *
ways[EDRAM_WAYS_IDX(cap)] *
sets[EDRAM_SETS_IDX(cap)];
}
void intel_dram_edram_detect(struct drm_i915_private *i915)
{
u32 edram_cap = 0;
if (!(IS_HASWELL(i915) || IS_BROADWELL(i915) || GRAPHICS_VER(i915) >= 9))
return;
edram_cap = intel_uncore_read_fw(&i915->uncore, HSW_EDRAM_CAP);
/* NB: We can't write IDICR yet because we don't have gt funcs set up */
if (!(edram_cap & EDRAM_ENABLED))
return;
/*
* The needed capability bits for size calculation are not there with
* pre gen9 so return 128MB always.
*/
if (GRAPHICS_VER(i915) < 9)
i915->edram_size_mb = 128;
else
i915->edram_size_mb = gen9_edram_size_mb(i915, edram_cap);
drm_info(&i915->drm, "Found %uMB of eDRAM\n", i915->edram_size_mb);
}
| linux-master | drivers/gpu/drm/i915/soc/intel_dram.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.